text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def build_list_type_validator(item_validator):
"""Return a function which validates that the value is a list of items
which are validated using item_validator.
"""
def validate_list_of_type(value):
return [item_validator(item) for item in validate_list(value)]
return validate_list_of_type | 0.003155 |
def prepare_untran(feat_type, tgt_dir, untran_dir):
""" Preprocesses untranscribed audio."""
org_dir = str(untran_dir)
wav_dir = os.path.join(str(tgt_dir), "wav", "untranscribed")
feat_dir = os.path.join(str(tgt_dir), "feat", "untranscribed")
if not os.path.isdir(wav_dir):
os.makedirs(wav_dir)
if not os.path.isdir(feat_dir):
os.makedirs(feat_dir)
# Standardize into wav files
for fn in os.listdir(org_dir):
in_path = os.path.join(org_dir, fn)
prefix, _ = os.path.splitext(fn)
mono16k_wav_path = os.path.join(wav_dir, "%s.wav" % prefix)
if not os.path.isfile(mono16k_wav_path):
feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path))
# Split up the wavs and write prefixes to prefix file.
wav_fns = os.listdir(wav_dir)
with (tgt_dir / "untranscribed_prefixes.txt").open("w") as prefix_f:
for fn in wav_fns:
in_fn = os.path.join(wav_dir, fn)
prefix, _ = os.path.splitext(fn)
# Split into sub-wavs and perform feat extraction.
split_id = 0
start, end = 0, 10 #in seconds
length = utils.wav_length(in_fn)
while True:
sub_wav_prefix = "{}.{}".format(prefix, split_id)
print(sub_wav_prefix, file=prefix_f)
out_fn = os.path.join(feat_dir, "{}.wav".format(sub_wav_prefix))
start_time = start * ureg.seconds
end_time = end * ureg.seconds
if not Path(out_fn).is_file():
wav.trim_wav_ms(Path(in_fn), Path(out_fn),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude)
if end > length:
break
start += 10
end += 10
split_id += 1
# Do feat extraction.
feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type) | 0.001969 |
def randomString(size: int = 20,
chars: str = string.ascii_letters + string.digits) -> str:
"""
Generate a random string of the specified size.
Ensure that the size is less than the length of chars as this function uses random.choice
which uses random sampling without replacement.
:param size: size of the random string to generate
:param chars: the set of characters to use to generate the random string. Uses alphanumerics by default.
:return: the random string generated
"""
return ''.join(sample(chars, size)) | 0.005263 |
def remove(self, id_option_vip):
"""Remove Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise OptionVipError: Option VIP associated with environment vip.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
url = 'optionvip/' + str(id_option_vip) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | 0.005495 |
def get_stats(self):
"""Return a string describing the stats"""
ostr = ''
errtotal = self.deletions['total']+self.insertions['total']+self.mismatches
ostr += "ALIGNMENT_COUNT\t"+str(self.alignment_count)+"\n"
ostr += "ALIGNMENT_BASES\t"+str(self.alignment_length)+"\n"
ostr += "ANY_ERROR\t"+str(errtotal)+"\n"
ostr += "MISMATCHES\t"+str(self.mismatches)+"\n"
ostr += "ANY_DELETION\t"+str(self.deletions['total'])+"\n"
ostr += "COMPLETE_DELETION\t"+str(self.deletions['specific'])+"\n"
ostr += "HOMOPOLYMER_DELETION\t"+str(self.deletions['homopolymer'])+"\n"
ostr += "ANY_INSERTION\t"+str(self.insertions['total'])+"\n"
ostr += "COMPLETE_INSERTION\t"+str(self.insertions['specific'])+"\n"
ostr += "HOMOPOLYMER_INSERTION\t"+str(self.insertions['homopolymer'])+"\n"
return ostr | 0.001205 |
def _process_current(self, handle, op, dest_path=None, dest_name=None):
"""Process current member with 'op' operation."""
unrarlib.RARProcessFileW(handle, op, dest_path, dest_name) | 0.010204 |
def attach(self, lun_or_snap, skip_hlu_0=False):
""" Attaches lun, snap or member snap of cg snap to host.
Don't pass cg snapshot in as `lun_or_snap`.
:param lun_or_snap: the lun, snap, or a member snap of cg snap
:param skip_hlu_0: whether to skip hlu 0
:return: the hlu number
"""
# `UnityResourceAlreadyAttachedError` check was removed due to there
# is a host cache existing in Cinder driver. If the lun was attached to
# the host and the info was stored in the cache, wrong hlu would be
# returned.
# And attaching a lun to a host twice would success, if Cinder retry
# triggers another attachment of same lun to the host, the cost would
# be one more rest request of `modifyLun` and one for host instance
# query.
try:
return self._attach_with_retry(lun_or_snap, skip_hlu_0)
except ex.SystemAPINotSupported:
# Attaching snap to host not support before 4.1.
raise
except ex.UnityAttachExceedLimitError:
# The number of luns exceeds system limit
raise
except: # noqa
# other attach error, remove this lun if already attached
self.detach(lun_or_snap)
raise | 0.001534 |
def document(schema):
"""Print a documented teleport version of the schema."""
teleport_schema = from_val(schema)
return json.dumps(teleport_schema, sort_keys=True, indent=2) | 0.005376 |
def symbolic_run_get_cons(trace):
'''
Execute a symbolic run that follows a concrete run; return constraints generated
and the stdin data produced
'''
m2 = Manticore.linux(prog, workspace_url='mem:')
f = Follower(trace)
m2.verbosity(VERBOSITY)
m2.register_plugin(f)
def on_term_testcase(mcore, state, stateid, err):
with m2.locked_context() as ctx:
readdata = []
for name, fd, data in state.platform.syscall_trace:
if name in ('_receive', '_read') and fd == 0:
readdata.append(data)
ctx['readdata'] = readdata
ctx['constraints'] = list(state.constraints.constraints)
m2.subscribe('will_terminate_state', on_term_testcase)
m2.run()
constraints = m2.context['constraints']
datas = m2.context['readdata']
return constraints, datas | 0.00227 |
def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param next_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param take_action_outputs: The outputs of the Policy's get_action method.
"""
self.trainer_metrics.start_experience_collection_timer()
if take_action_outputs:
self.stats['Policy/Value Estimate'].append(take_action_outputs['value'].mean())
self.stats['Policy/Entropy'].append(take_action_outputs['entropy'].mean())
self.stats['Policy/Learning Rate'].append(take_action_outputs['learning_rate'])
curr_info = curr_all_info[self.brain_name]
next_info = next_all_info[self.brain_name]
for agent_id in curr_info.agents:
self.training_buffer[agent_id].last_brain_info = curr_info
self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs
if curr_info.agents != next_info.agents:
curr_to_use = self.construct_curr_info(next_info)
else:
curr_to_use = curr_info
intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)
for agent_id in next_info.agents:
stored_info = self.training_buffer[agent_id].last_brain_info
stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs
if stored_info is not None:
idx = stored_info.agents.index(agent_id)
next_idx = next_info.agents.index(agent_id)
if not stored_info.local_done[idx]:
for i, _ in enumerate(stored_info.visual_observations):
self.training_buffer[agent_id]['visual_obs%d' % i].append(
stored_info.visual_observations[i][idx])
self.training_buffer[agent_id]['next_visual_obs%d' % i].append(
next_info.visual_observations[i][next_idx])
if self.policy.use_vec_obs:
self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])
self.training_buffer[agent_id]['next_vector_in'].append(
next_info.vector_observations[next_idx])
if self.policy.use_recurrent:
if stored_info.memories.shape[1] == 0:
stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))
self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])
actions = stored_take_action_outputs['action']
if self.policy.use_continuous_act:
actions_pre = stored_take_action_outputs['pre_action']
self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])
epsilons = stored_take_action_outputs['random_normal_epsilon']
self.training_buffer[agent_id]['random_normal_epsilon'].append(
epsilons[idx])
else:
self.training_buffer[agent_id]['action_mask'].append(
stored_info.action_masks[idx], padding_value=1)
a_dist = stored_take_action_outputs['log_probs']
value = stored_take_action_outputs['value']
self.training_buffer[agent_id]['actions'].append(actions[idx])
self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])
self.training_buffer[agent_id]['masks'].append(1.0)
if self.use_curiosity:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +
intrinsic_rewards[next_idx])
else:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])
self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])
self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]
if self.use_curiosity:
if agent_id not in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]
if not next_info.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
self.trainer_metrics.end_experience_collection_timer() | 0.00573 |
def get_map_values(self, lons, lats, ibin=None):
"""Return the map values corresponding to a set of coordinates.
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map
"""
pix_idxs = self.get_pixel_indices(lons, lats, ibin)
idxs = copy.copy(pix_idxs)
m = np.empty_like(idxs[0], dtype=bool)
m.fill(True)
for i, p in enumerate(pix_idxs):
m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])
idxs[i][~m] = 0
vals = self.counts.T[idxs]
vals[~m] = np.nan
return vals | 0.003064 |
def free(host, port, timeout=float('Inf')):
"""
Wait for the specified port to become free (dropping or rejecting
requests). Return when the port is free or raise a Timeout if timeout has
elapsed.
Timeout may be specified in seconds or as a timedelta.
If timeout is None or ∞, the routine will run indefinitely.
>>> free('localhost', find_available_local_port())
"""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
timer = timing.Timer(timeout)
while not timer.expired():
try:
# Expect a free port, so use a small timeout
Checker(timeout=0.1).assert_free(host, port)
return
except PortNotFree:
# Politely wait.
time.sleep(0.1)
raise Timeout("Port {port} not free on {host}.".format(**locals())) | 0.027632 |
def status(self, vm_name=None):
'''
Return the results of a `vagrant status` call as a list of one or more
Status objects. A Status contains the following attributes:
- name: The VM name in a multi-vm environment. 'default' otherwise.
- state: The state of the underlying guest machine (i.e. VM).
- provider: the name of the VM provider, e.g. 'virtualbox'. None
if no provider is output by vagrant.
Example return values for a multi-VM environment:
[Status(name='web', state='not created', provider='virtualbox'),
Status(name='db', state='not created', provider='virtualbox')]
And for a single-VM environment:
[Status(name='default', state='not created', provider='virtualbox')]
Possible states include, but are not limited to (since new states are
being added as Vagrant evolves):
- 'not_created' if the vm is destroyed
- 'running' if the vm is up
- 'poweroff' if the vm is halted
- 'saved' if the vm is suspended
- 'aborted' if the vm is aborted
Implementation Details:
This command uses the `--machine-readable` flag added in
Vagrant 1.5, mapping the target name, state, and provider-name
to a Status object.
Example with no VM name and multi-vm Vagrantfile:
$ vagrant status --machine-readable
1424098924,web,provider-name,virtualbox
1424098924,web,state,running
1424098924,web,state-human-short,running
1424098924,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
1424098924,db,provider-name,virtualbox
1424098924,db,state,not_created
1424098924,db,state-human-short,not created
1424098924,db,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Example with VM name:
$ vagrant status --machine-readable web
1424099027,web,provider-name,virtualbox
1424099027,web,state,running
1424099027,web,state-human-short,running
1424099027,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
Example with no VM name and single-vm Vagrantfile:
$ vagrant status --machine-readable
1424100021,default,provider-name,virtualbox
1424100021,default,state,not_created
1424100021,default,state-human-short,not created
1424100021,default,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Error example with incorrect VM name:
$ vagrant status --machine-readable api
1424099042,,error-exit,Vagrant::Errors::MachineNotFound,The machine with the name 'api' was not found configured for\nthis Vagrant environment.
Error example with missing Vagrantfile:
$ vagrant status --machine-readable
1424099094,,error-exit,Vagrant::Errors::NoEnvironmentError,A Vagrant environment or target machine is required to run this\ncommand. Run `vagrant init` to create a new Vagrant environment. Or%!(VAGRANT_COMMA)\nget an ID of a target machine from `vagrant global-status` to run\nthis command on. A final option is to change to a directory with a\nVagrantfile and to try again.
'''
# machine-readable output are CSV lines
output = self._run_vagrant_command(['status', '--machine-readable', vm_name])
return self._parse_status(output) | 0.002233 |
def prepare_blacklist(src, dst, duration=3600, src_port1=None,
src_port2=None, src_proto='predefined_tcp',
dst_port1=None, dst_port2=None,
dst_proto='predefined_tcp'):
"""
Create a blacklist entry.
A blacklist can be added directly from the engine node, or from
the system context. If submitting from the system context, it becomes
a global blacklist. This will return the properly formatted json
to submit.
:param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any'
:param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any'
:param int duration: length of time to blacklist
Both the system and engine context blacklist allow kw to be passed
to provide additional functionality such as adding source and destination
ports or port ranges and specifying the protocol. The following parameters
define the ``kw`` that can be passed.
The following example shows creating an engine context blacklist
using additional kw::
engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600,
src_port1=1000, src_port2=1500, src_proto='predefined_udp',
dst_port1=3, dst_port2=3000, dst_proto='predefined_udp')
:param int src_port1: start source port to limit blacklist
:param int src_port2: end source port to limit blacklist
:param str src_proto: source protocol. Either 'predefined_tcp'
or 'predefined_udp'. (default: 'predefined_tcp')
:param int dst_port1: start dst port to limit blacklist
:param int dst_port2: end dst port to limit blacklist
:param str dst_proto: dst protocol. Either 'predefined_tcp'
or 'predefined_udp'. (default: 'predefined_tcp')
.. note:: if blocking a range of ports, use both src_port1 and
src_port2, otherwise providing only src_port1 is adequate. The
same applies to dst_port1 / dst_port2. In addition, if you provide
src_portX but not dst_portX (or vice versa), the undefined port
side definition will default to all ports.
"""
json = {}
directions = {src: 'end_point1', dst: 'end_point2'}
for direction, key in directions.items():
json[key] = {'address_mode': 'any'} if \
'any' in direction.lower() else {'address_mode': 'address', 'ip_network': direction}
if src_port1:
json.setdefault('end_point1').update(
port1=src_port1,
port2=src_port2 or src_port1,
port_mode=src_proto)
if dst_port1:
json.setdefault('end_point2').update(
port1=dst_port1,
port2=dst_port2 or dst_port1,
port_mode=dst_proto)
json.update(duration=duration)
return json | 0.004982 |
async def set(
self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _cas_token=None, _conn=None
):
"""
Stores the value in the given key with ttl if specified
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if the value was set
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
ns_key = self.build_key(key, namespace=namespace)
res = await self._set(
ns_key, dumps(value), ttl=self._get_ttl(ttl), _cas_token=_cas_token, _conn=_conn
)
logger.debug("SET %s %d (%.4f)s", ns_key, True, time.monotonic() - start)
return res | 0.005742 |
def get_assignment_group(self, course_id, assignment_group_id, grading_period_id=None, include=None, override_assignment_dates=None):
"""
Get an Assignment Group.
Returns the assignment group with the given id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_group_id
"""ID"""
path["assignment_group_id"] = assignment_group_id
# OPTIONAL - include
"""Associations to include with the group. "discussion_topic" and "assignment_visibility" and "submission"
are only valid if "assignments" is also included. The "assignment_visibility" option additionally
requires that the Differentiated Assignments course feature be turned on."""
if include is not None:
self._validate_enum(include, ["assignments", "discussion_topic", "assignment_visibility", "submission"])
params["include"] = include
# OPTIONAL - override_assignment_dates
"""Apply assignment overrides for each assignment, defaults to true."""
if override_assignment_dates is not None:
params["override_assignment_dates"] = override_assignment_dates
# OPTIONAL - grading_period_id
"""The id of the grading period in which assignment groups are being requested
(Requires the Multiple Grading Periods account feature turned on)"""
if grading_period_id is not None:
params["grading_period_id"] = grading_period_id
self.logger.debug("GET /api/v1/courses/{course_id}/assignment_groups/{assignment_group_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignment_groups/{assignment_group_id}".format(**path), data=data, params=params, single_item=True) | 0.004975 |
def flp_nonlinear_mselect(I,J,d,M,f,c,K):
"""flp_nonlinear_mselect -- use multiple selection model
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- piecewise linear version with multiple selection")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,z = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],z[j] = mult_selection(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * z[j][k] for k in range(K)),\
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model | 0.019282 |
def parse(self, instrs):
"""Parse an IR instruction.
"""
instrs_reil = []
try:
for instr in instrs:
instr_lower = instr.lower()
# If the instruction to parsed is not in the cache,
# parse it and add it to the cache.
if instr_lower not in self._cache:
self._cache[instr_lower] = instruction.parseString(
instr_lower)[0]
# Retrieve parsed instruction from the cache and clone
# it.
instrs_reil += [copy.deepcopy(self._cache[instr_lower])]
except:
error_msg = "Failed to parse instruction: %s"
logger.error(error_msg, instr, exc_info=True)
return instrs_reil | 0.00375 |
def name_insert_prefix(records, prefix):
"""
Given a set of sequences, insert a prefix for each sequence's name.
"""
logging.info('Applying _name_insert_prefix generator: '
'Inserting prefix ' + prefix + ' for all '
'sequence IDs.')
for record in records:
new_id = prefix + record.id
_update_id(record, new_id)
yield record | 0.002494 |
def createSQL(self, sql, args=()):
"""
For use with auto-committing statements such as CREATE TABLE or CREATE
INDEX.
"""
before = time.time()
self._execSQL(sql, args)
after = time.time()
if after - before > 2.0:
log.msg('Extremely long CREATE: %s' % (after - before,))
log.msg(sql) | 0.00542 |
def _CreateTaskStorageWriter(self, path, task):
"""Creates a task storage writer.
Args:
path (str): path to the storage file.
task (Task): task.
Returns:
SQLiteStorageFileWriter: storage writer.
"""
return SQLiteStorageFileWriter(
self._session, path,
storage_type=definitions.STORAGE_TYPE_TASK, task=task) | 0.002762 |
def unpack(s):
"""Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.recordio.unpack(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
"""
header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
header = header._replace(label=np.frombuffer(s, np.float32, header.flag))
s = s[header.flag*4:]
return header, s | 0.002703 |
def start_centroid_distance(item_a, item_b, max_value):
"""
Distance between the centroids of the first step in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
start_a = item_a.center_of_mass(item_a.times[0])
start_b = item_b.center_of_mass(item_b.times[0])
start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2)
return np.minimum(start_distance, max_value) / float(max_value) | 0.004412 |
def asnumpy(self):
"""Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data | 0.00411 |
def stage_signature(vcs, signature):
"""Add `signature` to the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
signature (basestring)
Raises:
AlreadyStagedError
"""
evidence_path = _get_staged_history_path(vcs)
staged = get_staged_signatures(vcs)
if signature in staged:
raise AlreadyStagedError
staged.append(signature)
string = '\n'.join(staged)
with open(evidence_path, 'w') as f:
f.write(string) | 0.002037 |
def stop(self):
"""
Stop ZMQ tools.
:return: self
"""
LOGGER.debug("zeromq.Driver.stop")
for publisher in self.publishers_registry:
publisher.stop()
self.publishers_registry.clear()
for subscriber in self.subscribers_registry:
if subscriber.is_started:
subscriber.stop()
self.subscribers_registry.clear()
# pykka.ActorRegistry.stop_all()
return self | 0.004193 |
def get_smtp_mail(self):
"""
Returns the SMTP formatted email, as it may be passed to sendmail.
:rtype: string
:return: The SMTP formatted mail.
"""
header = self.get_smtp_header()
body = self.get_body().replace('\n', '\r\n')
return header + '\r\n' + body + '\r\n' | 0.006061 |
def from_string(cls, s):
"""Instantiate Relations from a relations string."""
tables = []
seen = set()
current_table = None
lines = list(reversed(s.splitlines())) # to pop() in right order
while lines:
line = lines.pop().strip()
table_m = re.match(r'^(?P<table>\w.+):$', line)
field_m = re.match(r'\s*(?P<name>\S+)'
r'(\s+(?P<attrs>[^#]+))?'
r'(\s*#\s*(?P<comment>.*)$)?',
line)
if table_m is not None:
table_name = table_m.group('table')
if table_name in seen:
raise ItsdbError(
'Table {} already defined.'.format(table_name)
)
current_table = (table_name, [])
tables.append(current_table)
seen.add(table_name)
elif field_m is not None and current_table is not None:
name = field_m.group('name')
attrs = field_m.group('attrs').split()
datatype = attrs.pop(0)
key = ':key' in attrs
partial = ':partial' in attrs
comment = field_m.group('comment')
current_table[1].append(
Field(name, datatype, key, partial, comment)
)
elif line != '':
raise ItsdbError('Invalid line: ' + line)
return cls(tables) | 0.001311 |
def parse_netmhcpan28_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
check_stdout_error(stdout, "NetMHCpan-2.8")
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=3,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=5,
rank_index=6,
log_ic50_index=4) | 0.001056 |
def get_transactions_filtered(self, asset_id, operation=None):
"""Get a list of transactions filtered on some criteria
"""
txids = backend.query.get_txids_filtered(self.connection, asset_id,
operation)
for txid in txids:
yield self.get_transaction(txid) | 0.00578 |
def list_available_devices():
"""
List all available devices for the respective backend
returns: devices: a list of dictionaries with the keys 'identifier' and 'instance': \
[ {'identifier': 'usb://0x04f9:0x2015/C5Z315686', 'instance': pyusb.core.Device()}, ]
The 'identifier' is of the format idVendor:idProduct_iSerialNumber.
"""
class find_class(object):
def __init__(self, class_):
self._class = class_
def __call__(self, device):
# first, let's check the device
if device.bDeviceClass == self._class:
return True
# ok, transverse all devices to find an interface that matches our class
for cfg in device:
# find_descriptor: what's it?
intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)
if intf is not None:
return True
return False
# only Brother printers
printers = usb.core.find(find_all=1, custom_match=find_class(7), idVendor=0x04f9)
def identifier(dev):
try:
serial = usb.util.get_string(dev, 256, dev.iSerialNumber)
return 'usb://0x{:04x}:0x{:04x}_{}'.format(dev.idVendor, dev.idProduct, serial)
except:
return 'usb://0x{:04x}:0x{:04x}'.format(dev.idVendor, dev.idProduct)
return [{'identifier': identifier(printer), 'instance': printer} for printer in printers] | 0.007488 |
def default_instruction_to_svg_dict(self, instruction):
"""Returns an xml-dictionary with the same content as
:meth:`default_instruction_to_svg`
If no file ``default.svg`` was loaded, an empty svg-dict is returned.
"""
instruction_type = instruction.type
default_type = "default"
rep_str = "{instruction.type}"
if default_type not in self._instruction_type_to_file_content:
return {"svg": ""}
default_svg = self._instruction_type_to_file_content[default_type]
default_svg = default_svg.replace(rep_str, instruction_type)
colored_svg = self._set_fills_in_color_layer(default_svg,
instruction.hex_color)
return colored_svg | 0.002558 |
def make_flow_labels(graph, flow, capac):
"""Generate arc labels for a flow in a graph with capacities.
:param graph: adjacency list or adjacency dictionary
:param flow: flow matrix or adjacency dictionary
:param capac: capacity matrix or adjacency dictionary
:returns: listdic graph representation, with the arc label strings
"""
V = range(len(graph))
arc_label = [{v:"" for v in graph[u]} for u in V]
for u in V:
for v in graph[u]:
if flow[u][v] >= 0:
arc_label[u][v] = "%s/%s" % (flow[u][v], capac[u][v])
else:
arc_label[u][v] = None # do not show negative flow arcs
return arc_label | 0.002882 |
def _handle_double_click(self, event):
""" Double click with left mouse button focuses the state and toggles the collapse status"""
if event.get_button()[1] == 1: # Left mouse button
path_info = self.tree_view.get_path_at_pos(int(event.x), int(event.y))
if path_info: # Valid entry was clicked on
path = path_info[0]
iter = self.tree_store.get_iter(path)
state_model = self.tree_store.get_value(iter, self.MODEL_STORAGE_ID)
# Set focus to StateModel
selection = self._selected_sm_model.selection
selection.focus = state_model
# Toggle collapse status if applicable for this kind of state
if self.view.row_expanded(path):
self.view.collapse_row(path)
else:
if isinstance(state_model, ContainerStateModel) or \
isinstance(state_model, LibraryStateModel) and self.show_content(state_model):
self.view.expand_to_path(path) | 0.005425 |
def quote_identifier(identifier: str,
mixed: Union[SQLCompiler, Engine, Dialect]) -> str:
"""
Converts an SQL identifier to a quoted version, via the SQL dialect in
use.
Args:
identifier: the identifier to be quoted
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns:
the quoted identifier
"""
# See also http://sqlalchemy-utils.readthedocs.io/en/latest/_modules/sqlalchemy_utils/functions/orm.html # noqa
return get_preparer(mixed).quote(identifier) | 0.001704 |
def write(self, content):
"""Save content on disk"""
with io.open(self.target, 'w', encoding='utf-8') as fp:
fp.write(content)
if not content.endswith(u'\n'):
fp.write(u'\n') | 0.008696 |
def run(cmd, background=False):
"""
Executes the given command
If background flag is True the command will run in background
and this method will return a :class:`Popen` object
If background is False (default) the command will run in this thread
and this method will return stdout.
A CommandException will be raised if command fails
"""
logger.debug('Running command: %s' % cmd)
if background:
return subprocess.Popen(cmd, shell=True, close_fds=True)
else:
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
logger.error("Command failed: %s" % cmd)
if output:
logger.debug('OUTPUT:\n' + output)
if status != 0:
raise CommandException(status, output)
return output | 0.00123 |
def rm_corr(data=None, x=None, y=None, subject=None, tail='two-sided'):
"""Repeated measures correlation.
Parameters
----------
data : pd.DataFrame
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'r' : Repeated measures correlation coefficient
'dof' : Degrees of freedom
'pval' : one or two tailed p-value
'CI95' : 95% parametric confidence intervals
'power' : achieved power of the test (= 1 - type II error).
Notes
-----
Repeated measures correlation (rmcorr) is a statistical technique
for determining the common within-individual association for paired
measures assessed on two or more occasions for multiple individuals.
From Bakdash and Marusich (2017):
"Rmcorr accounts for non-independence among observations using analysis
of covariance (ANCOVA) to statistically adjust for inter-individual
variability. By removing measured variance between-participants,
rmcorr provides the best linear fit for each participant using parallel
regression lines (the same slope) with varying intercepts.
Like a Pearson correlation coefficient, the rmcorr coefficient
is bounded by − 1 to 1 and represents the strength of the linear
association between two variables."
Results have been tested against the `rmcorr` R package.
Please note that NaN are automatically removed from the dataframe
(listwise deletion).
References
----------
.. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation.
Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456
.. [2] Bland, J. M., & Altman, D. G. (1995). Statistics notes: Calculating
correlation coefficients with repeated observations:
Part 1—correlation within subjects. Bmj, 310(6977), 446.
.. [3] https://github.com/cran/rmcorr
Examples
--------
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
r dof pval CI95% power
rm_corr -0.507 38 0.000847 [-0.71, -0.23] 0.93
"""
from pingouin import ancova, power_corr
# Safety checks
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame'
assert x in data, 'The %s column is not in data.' % x
assert y in data, 'The %s column is not in data.' % y
assert subject in data, 'The %s column is not in data.' % subject
if data[subject].nunique() < 3:
raise ValueError('rm_corr requires at least 3 unique subjects.')
# Remove missing values
data = data[[x, y, subject]].dropna(axis=0)
# Using PINGOUIN
aov, bw = ancova(dv=y, covar=x, between=subject, data=data,
return_bw=True)
sign = np.sign(bw)
dof = int(aov.loc[2, 'DF'])
n = dof + 2
ssfactor = aov.loc[1, 'SS']
sserror = aov.loc[2, 'SS']
rm = sign * np.sqrt(ssfactor / (ssfactor + sserror))
pval = aov.loc[1, 'p-unc']
pval *= 0.5 if tail == 'one-sided' else 1
ci = compute_esci(stat=rm, nx=n, eftype='pearson').tolist()
pwr = power_corr(r=rm, n=n, tail=tail)
# Convert to Dataframe
stats = pd.DataFrame({"r": round(rm, 3), "dof": int(dof),
"pval": pval, "CI95%": str(ci),
"power": round(pwr, 3)}, index=["rm_corr"])
return stats | 0.000267 |
def confirm(pid, record, template, **kwargs):
"""Confirm email address."""
recid = int(pid.pid_value)
token = request.view_args['token']
# Validate token
data = EmailConfirmationSerializer.compat_validate_token(token)
if data is None:
flash(_("Invalid confirmation link."), category='danger')
return redirect(url_for("invenio_records_ui.recid", pid_value=recid))
# Validate request exists.
r = AccessRequest.query.get(data['id'])
if not r:
abort(404)
# Confirm email address.
if r.status != RequestStatus.EMAIL_VALIDATION:
abort(404)
r.confirm_email()
db.session.commit()
flash(_("Email validated and access request submitted."), category='info')
return redirect(url_for("invenio_records_ui.recid", pid_value=recid)) | 0.001229 |
def get_col_rgba(color, transparency=None, opacity=None):
"""This class converts a Gdk.Color into its r, g, b parts and adds an alpha according to needs
If both transparency and opacity is None, alpha is set to 1 => opaque
:param Gdk.Color color: Color to extract r, g and b from
:param float | None transparency: Value between 0 (opaque) and 1 (transparent) or None if opacity is to be used
:param float | None opacity: Value between 0 (transparent) and 1 (opaque) or None if transparency is to be used
:return: Red, Green, Blue and Alpha value (all between 0.0 - 1.0)
"""
r, g, b = color.red, color.green, color.blue
# Convert from 0-6535 to 0-1
r /= 65535.
g /= 65535.
b /= 65535.
if transparency is not None or opacity is None:
transparency = 0 if transparency is None else transparency # default value
if transparency < 0 or transparency > 1:
raise ValueError("Transparency must be between 0 and 1")
alpha = 1 - transparency
else:
if opacity < 0 or opacity > 1:
raise ValueError("Opacity must be between 0 and 1")
alpha = opacity
return r, g, b, alpha | 0.004216 |
def ui_clear_clicked_image(self, value):
"""
Setter for **self.__ui_clear_clicked_image** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"ui_clear_clicked_image", value)
assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format(
"ui_clear_clicked_image", value)
self.__ui_clear_clicked_image = value | 0.007055 |
def transformer(self):
"""
Creates the internal transformer that maps the cluster center's high
dimensional space to its two dimensional space.
"""
ttype = self.embedding.lower() # transformer method type
if ttype == 'mds':
return MDS(n_components=2, random_state=self.random_state)
if ttype == 'tsne':
return TSNE(n_components=2, random_state=self.random_state)
raise YellowbrickValueError("unknown embedding '{}'".format(ttype)) | 0.005758 |
def get_uri_obj(uri, storage_args={}):
"""
Retrieve the underlying storage object based on the URI (i.e., scheme).
:param str uri: URI to get storage object for
:param dict storage_args: Keyword arguments to pass to the underlying storage object
"""
if isinstance(uri, BaseURI): return uri
uri_obj = None
o = urlparse(uri)
for storage in STORAGES:
uri_obj = storage.parse_uri(o, storage_args=storage_args)
if uri_obj is not None:
break
#end for
if uri_obj is None:
raise TypeError('<{}> is an unsupported URI.'.format(uri))
return uri_obj | 0.00639 |
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu | 0.002915 |
def magic_session(db_session=None, url=None):
"""Either does nothing with the session you already have or
makes one that commits and closes no matter what happens
"""
if db_session is not None:
yield db_session
else:
session = get_session(url, expire_on_commit=False)
try:
try:
yield session
finally:
session.commit()
finally:
session.close() | 0.00216 |
def calc_pvalues(query, gene_sets, background=20000, **kwargs):
""" calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
"""
# number of genes in your query data
k = len(query)
query = set(query)
vals = []
# background should be all genes in annotated database
# such as go, kegg et.al.
if isinstance(background, set):
bg = len(background) # total number in your annotated database
# filter genes that not found in annotated database
query = query.intersection(background)
elif isinstance(background, int):
bg = background
else:
raise ValueError("background should be set or int object")
# pval
subsets = sorted(gene_sets.keys())
for s in subsets:
category = gene_sets.get(s)
m = len(category)
hits = query.intersection(set(category))
x = len(hits)
if x < 1 : continue
# pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal)
# p(X >= hitCounts)
vals.append((s, hypergeom.sf(x-1, bg, m, k), x, m, hits))
return zip(*vals) | 0.008445 |
def handle_button(self, event, event_type):
"""Convert the button information from quartz into evdev format."""
# 0 for left
# 1 for right
# 2 for middle/center
# 3 for side
mouse_button_number = self._get_mouse_button_number(event)
# Identify buttons 3,4,5
if event_type in (25, 26):
event_type = event_type + (mouse_button_number * 0.1)
# Add buttons to events
event_type_string, event_code, value, scan = self.codes[event_type]
if event_type_string == "Key":
scan_event, key_event = self.emulate_press(
event_code, scan, value, self.timeval)
self.events.append(scan_event)
self.events.append(key_event)
# doubleclick/n-click of button
click_state = self._get_click_state(event)
repeat = self.emulate_repeat(click_state, self.timeval)
self.events.append(repeat) | 0.002103 |
def list_lbaas_pools(self, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_pools for a project."""
return self.list('pools', self.lbaas_pools_path,
retrieve_all, **_params) | 0.008811 |
def appendSpacePadding(str, blocksize=AES_blocksize):
'Pad with spaces'
pad_len = paddingLength(len(str), blocksize)
padding = '\0'*pad_len
return str + padding | 0.039326 |
def submitted_projects(raw_df):
"""
Return all submitted projects.
"""
df = raw_df.astype({'PRONAC': str, 'CgcCpf': str})
submitted_projects = df.groupby('CgcCpf')[
'PRONAC'
].agg(['unique', 'nunique'])
submitted_projects.columns = ['pronac_list', 'num_pronacs']
return submitted_projects | 0.00303 |
def example_delete_topics(a, topics):
""" delete topics """
# Call delete_topics to asynchronously delete topics, a future is returned.
# By default this operation on the broker returns immediately while
# topics are deleted in the background. But here we give it some time (30s)
# to propagate in the cluster before returning.
#
# Returns a dict of <topic,future>.
fs = a.delete_topics(topics, operation_timeout=30)
# Wait for operation to finish.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} deleted".format(topic))
except Exception as e:
print("Failed to delete topic {}: {}".format(topic, e)) | 0.001362 |
def exists_uda(self, name, database=None):
"""
Checks if a given UDAF exists within a specified database
Parameters
----------
name : string, UDAF name
database : string, database name
Returns
-------
if_exists : boolean
"""
return len(self.list_udas(database=database, like=name)) > 0 | 0.005333 |
def get_global_config_dir():
"""Returns global config location. E.g. ~/.config/dvc/config.
Returns:
str: path to the global config directory.
"""
from appdirs import user_config_dir
return user_config_dir(
appname=Config.APPNAME, appauthor=Config.APPAUTHOR
) | 0.006024 |
def _or_join(self, close_group=False):
"""Combine terms with OR.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example:
If the current query is "(term1"
.or(close_group=True) => "(term1) OR("
.or(close_group=False) => "(term1 OR "
Returns:
SearchHelper: Self
"""
if not self.initialized:
raise ValueError("You must add a search term before adding an operator.")
else:
self._operator("OR", close_group=close_group)
return self | 0.005031 |
def _authorization_header(cls, credentials):
"""
Creates authorization headers if the provider supports it. See:
http://en.wikipedia.org/wiki/Basic_access_authentication.
:param credentials:
:class:`.Credentials`
:returns:
Headers as :class:`dict`.
"""
if cls._x_use_authorization_header:
res = ':'.join(
(credentials.consumer_key,
credentials.consumer_secret))
res = base64.b64encode(six.b(res)).decode()
return {'Authorization': 'Basic {0}'.format(res)}
else:
return {} | 0.003106 |
def add_directory(self, relativePath, description=None, clean=False,
raiseError=True, ntrials=3):
"""
Add a directory in the repository and creates its attribute in the
Repository with utc timestamp. It insures adding all the missing
directories in the path.
:Parameters:
#. relativePath (string): The relative to the repository path to
where directory must be added.
#. description (None, string): Any random description about the
added directory.
#. clean (boolean): Whether to remove existing non repository
tracked files and folders in all created directory chain tree.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether adding the directory was successful.
#. message (None, string): Reason why directory was not added or
random information.
"""
assert isinstance(raiseError, bool), "raiseError must be boolean"
assert isinstance(relativePath, basestring), "relativePath must be a string"
if description is not None:
assert isinstance(description, basestring), "description must be None or a string"
assert isinstance(ntrials, int), "ntrials must be integer"
assert ntrials>0, "ntrials must be >0"
# normalise path
path = self.to_repo_relative_path(path=relativePath, split=False)
# whether to replace
if self.is_repository_directory(path):
return True, "Directory is already tracked in repository"
# check whether name is allowed
allowed, reason = self.is_name_allowed(path)
if not allowed:
if raiseError:
raise Exception(reason)
return False, reason
# lock repository and get __repo updated from disk
LR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))
acquired, code = LR.acquire_lock()
if not acquired:
m = "code %s. Unable to aquire the lock to add directory. You may try again!"%(code,)
if raiseError:
raise Exception(m)
return False,m
# load repository info
for _trial in range(ntrials):
try:
repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))
self.__repo['walk_repo'] = repo['walk_repo']
except Exception as err:
error = str(err)
if self.DEBUG_PRINT_FAILED_TRIALS: print("Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute"%(_trial, inspect.stack()[1][3], str(error)))
else:
error = None
break
if error is not None:
_ = LR.release_lock()
assert not raiseError, Exception(error)
return False, error
# create directories
error = None
posList = self.__repo['walk_repo']
dirPath = self.__path
spath = path.split(os.sep)
for idx, name in enumerate(spath):
# create and acquire lock.
LD = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(dirPath, self.__dirLock))
acquired, code = LD.acquire_lock()
if not acquired:
error = "Code %s. Unable to aquire the lock when adding '%s'. All prior directories were added. You may try again, to finish adding directory"%(code,dirPath)
break
# add to directory
for _trial in range(ntrials):
try:
dirPath = os.path.join(dirPath, name)
riPath = os.path.join(dirPath, self.__dirInfo)
dList = [d for d in posList if isinstance(d, dict)]
dList = [d for d in dList if name in d]
# clean directory
if not len(dList) and clean and os.path.exists(dirPath):
try:
shutil.rmtree( dirPath, ignore_errors=True )
except Exception as err:
error = "Unable to clean directory '%s' (%s)"%(dirPath, err)
break
# create directory
if not os.path.exists(dirPath):
try:
os.mkdir(dirPath)
except Exception as err:
error = "Unable to create directory '%s' (%s)"%(dirPath, err)
break
# create and dump dirinfo
self.__save_dirinfo(description=[None, description][idx==len(spath)-1],
dirInfoPath=riPath, create=True)
# update directory list
if not len(dList):
rsd = {name:[]}
posList.append(rsd)
posList = rsd[name]
else:
assert len(dList) == 1, "Same directory name dict is found twice. This should'n have happened. Report issue"
posList = dList[0][name]
except Exception as err:
LD.release_lock()
error = "Unable to create directory '%s' info file (%s)"%(dirPath, str(err))
if self.DEBUG_PRINT_FAILED_TRIALS: print("Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute"%(_trial, inspect.stack()[1][3], str(error)))
else:
LD.release_lock()
break
if error is not None:
break
# save __repo
if error is None:
try:
_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)
except Exception as err:
error = str(err)
pass
try:
LD.release_lock()
except:
pass
try:
LR.release_lock()
except:
pass
# check and return
assert error is None or not raiseError, error
return error is None, error | 0.006277 |
def run_task(self, task_name, **options):
""" Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
"""
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config) | 0.0075 |
def _onCompletionListItemSelected(self, index):
"""Item selected. Insert completion to editor
"""
model = self._widget.model()
selectedWord = model.words[index]
textToInsert = selectedWord[len(model.typedText()):]
self._qpart.textCursor().insertText(textToInsert)
self._closeCompletion() | 0.005831 |
def conf_as_dict(conf_filename, encoding=None, case_sensitive=False):
"""
读入 ini 配置文件,返回根据配置文件内容生成的字典类型变量;
:param:
* conf_filename: (string) 需要读入的 ini 配置文件长文件名
* encoding: (string) 文件编码
* case_sensitive: (bool) 是否大小写敏感,默认为 False
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
* count: (int) 读取到的配置文件有多少个 key 的数量
举例如下::
print('--- conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.ini'
# 读取配置文件
ds = conf_as_dict(conf_filename)
ds1 = conf_as_dict(conf_filename, case_sensitive=True)
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict:', ds[1])
print('length:', ds[2])
d = ds[1]
d1 = ds1[1]
# 显示一个 section 下的所有内容
print('section show_opt:', d['show_opt'])
# 显示一个 section 下的所有内容,大小写敏感
print('section show_opt:', d1['show_opt'])
# 显示一个 section 下面的 key 的 value 内容
print('section show_opt, key short_opt:', d['show_opt']['short_opt'])
# 读取一个复杂的section,先读出 key 中的 count 内容,再遍历每个 key 的 value
i = int(d['get_extra_rules']['erule_count'])
print('section get_extra_rules, key erule_count:', i)
for j in range(i):
print('section get_extra_rules, key erule_type:', d['get_extra_rules']['erule_'+str(j)])
print('---')
执行结果::
--- conf_as_dict demo---
flag: True
dict: (omit)
length: 7
section show_opt: {'short_opt': 'b:d:v:p:f:', 'long_opt': 'region=,prov=,mer_id=,mer_short_name=,web_status='}
section show_opt: {'Short_Opt': 'b:d:v:p:f:', 'Long_Opt': 'region=,prov=,mer_id=,mer_short_name=,web_status='}
section show_opt, key short_opt: b:d:v:p:f:
section get_extra_rules, key erule_count: 2
section get_extra_rules, key erule_type: extra_rule_1
section get_extra_rules, key erule_type: extra_rule_2
---
"""
flag = False
# 检查文件是否存在
if not pathlib.Path(conf_filename).is_file():
return flag,
# 判断是否对大小写敏感
cf = configparser.ConfigParser() if not case_sensitive else MyConfigParser()
# 读入 config 文件
try:
if sys.version > '3':
cf.read(conf_filename, encoding=encoding)
else:
cf.read(conf_filename)
except:
flag = False
return flag,
d = OrderedDict(cf._sections)
for k in d:
d[k] = OrderedDict(cf._defaults, **d[k])
d[k].pop('__name__', None)
flag = True
# 计算有多少 key
count = len(d.keys())
return flag, d, count | 0.004772 |
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br) | 0.006787 |
def extract(self, msg):
"""Yield an ordered dictionary if msg['type'] is in keys_by_type."""
def normal(key):
v = msg.get(key)
if v is None:
return v
normalizer = self.normalizers.get(key, lambda x: x)
return normalizer(v)
def odict(keys):
return collections.OrderedDict((k, normal(k)) for k in keys)
def match(m):
return (msg.get(k) in v for k, v in m.items()) if m else ()
accept = all(match(self.accept))
reject = any(match(self.reject))
if reject or not accept:
keys = ()
elif self.keys_by_type is None:
keys = [k for k in msg.keys() if k not in self.omit]
else:
keys = self.keys_by_type.get(msg.get('type'))
return odict(keys) | 0.002384 |
def dvds_new_releases(self, **kwargs):
"""Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('dvds_new_releases')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | 0.003442 |
def run_epoch(self, epoch_info: EpochInfo, source: 'vel.api.Source'):
""" Run full epoch of learning """
epoch_info.on_epoch_begin()
lr = epoch_info.optimizer.param_groups[-1]['lr']
print("|-------- Epoch {:06} Lr={:.6f} ----------|".format(epoch_info.global_epoch_idx, lr))
self.train_epoch(epoch_info, source)
epoch_info.result_accumulator.freeze_results('train')
self.validation_epoch(epoch_info, source)
epoch_info.result_accumulator.freeze_results('val')
epoch_info.on_epoch_end() | 0.005348 |
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = to_string(value)
if allow_token:
token_chars = HEADER_TOKEN_CHARS | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') | 0.001799 |
def _get_flat_db_sources(self, model):
""" Return a flattened representation of the individual ``sources`` lists. """
sources = []
for source in self.sources:
for sub_source in self.expand_source(source):
target_field = self.resolve_source(model, sub_source)
if target_field:
sources.append(sub_source)
return sources | 0.007264 |
def create_stash(self, payload, path=None):
"""
Create a stash. (JSON document)
"""
if path:
self._request('POST', '/stashes/{}'.format(path),
json=payload)
else:
self._request('POST', '/stashes', json=payload)
return True | 0.00625 |
def all_agents(stmts):
"""Return a list of all of the agents from a list of statements.
Only agents that are not None and have a TEXT entry are returned.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
agents : list of :py:class:`indra.statements.Agent`
List of agents that appear in the input list of indra statements.
"""
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents | 0.001287 |
def save(self, info):
""" Handles saving the current model to the last file.
"""
save_file = self.save_file
if not isfile(save_file):
self.save_as(info)
else:
fd = None
try:
fd = open(save_file, "wb")
dot_code = str(self.model)
fd.write(dot_code)
finally:
if fd is not None:
fd.close() | 0.004367 |
def resolve_include(self, t):
"""Resolve a tuple-ized #include line.
This handles recursive expansion of values without "" or <>
surrounding the name until an initial " or < is found, to handle
#include FILE
where FILE is a #define somewhere else."""
s = t[1]
while not s[0] in '<"':
#print("s =", s)
try:
s = self.cpp_namespace[s]
except KeyError:
m = function_name.search(s)
s = self.cpp_namespace[m.group(1)]
if callable(s):
args = function_arg_separator.split(m.group(2))
s = s(*args)
if not s:
return None
return (t[0], s[0], s[1:-1]) | 0.003831 |
def attach_volume(self, xml_bytes):
"""Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device
"""
root = XML(xml_bytes)
status = root.findtext("status")
attach_time = root.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
return {"status": status, "attach_time": attach_time} | 0.003401 |
def _AsList(arg):
"""Encapsulates an argument in a list, if it's not already iterable."""
if (isinstance(arg, string_types) or
not isinstance(arg, collections.Iterable)):
return [arg]
else:
return list(arg) | 0.016807 |
def iter_parsed_values(self, field: Field) -> Iterable[Tuple[str, Any]]:
"""
Walk the dictionary of parsers and emit all non-null values.
"""
for key, func in self.parsers.items():
value = func(field)
if not value:
continue
yield key, value | 0.006154 |
def query_tissue_specificity():
"""
Returns list of tissue specificity by query parameters
---
tags:
- Query functions
parameters:
- name: comment
in: query
type: string
required: false
description: Comment to tissue specificity
default: '%APP695%'
- name: entry_name
in: query
type: string
required: false
description: reference identifier
default: A4_HUMAN
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
args = get_args(
request_args=request.args,
allowed_str_args=['comment', 'entry_name'],
allowed_int_args=['limit']
)
return jsonify(query.tissue_specificity(**args)) | 0.001186 |
def _parse_resource(resource):
""" Parses and completes resource information """
resource = resource.strip() if resource else resource
if resource in {ME_RESOURCE, USERS_RESOURCE}:
return resource
elif '@' in resource and not resource.startswith(USERS_RESOURCE):
# when for example accessing a shared mailbox the
# resource is set to the email address. we have to prefix
# the email with the resource 'users/' so --> 'users/email_address'
return '{}/{}'.format(USERS_RESOURCE, resource)
else:
return resource | 0.003226 |
def load_json(json_file, **kwargs):
"""
Open and load data from a JSON file
.. code:: python
reusables.load_json("example.json")
# {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}}
:param json_file: Path to JSON file as string
:param kwargs: Additional arguments for the json.load command
:return: Dictionary
"""
with open(json_file) as f:
return json.load(f, **kwargs) | 0.002283 |
def GetSummary(self):
"""Gets a client summary object.
Returns:
rdf_client.ClientSummary
Raises:
ValueError: on bad cloud type
"""
summary = rdf_client.ClientSummary()
summary.client_id = self.client_id
summary.timestamp = self.timestamp
summary.system_info.release = self.os_release
summary.system_info.version = str(self.os_version or "")
summary.system_info.kernel = self.kernel
summary.system_info.machine = self.arch
summary.system_info.install_date = self.install_time
kb = self.knowledge_base
if kb:
summary.system_info.fqdn = kb.fqdn
summary.system_info.system = kb.os
summary.users = kb.users
summary.interfaces = self.interfaces
summary.client_info = self.startup_info.client_info
if kb.os_release:
summary.system_info.release = kb.os_release
if kb.os_major_version:
summary.system_info.version = "%d.%d" % (kb.os_major_version,
kb.os_minor_version)
hwi = self.hardware_info
if hwi:
summary.serial_number = hwi.serial_number
summary.system_manufacturer = hwi.system_manufacturer
summary.system_uuid = hwi.system_uuid
cloud_instance = self.cloud_instance
if cloud_instance:
summary.cloud_type = cloud_instance.cloud_type
if cloud_instance.cloud_type == "GOOGLE":
summary.cloud_instance_id = cloud_instance.google.unique_id
elif cloud_instance.cloud_type == "AMAZON":
summary.cloud_instance_id = cloud_instance.amazon.instance_id
else:
raise ValueError("Bad cloud type: %s" % cloud_instance.cloud_type)
return summary | 0.008844 |
def create_framework(
bundles,
properties=None,
auto_start=False,
wait_for_stop=False,
auto_delete=False,
):
# type: (Union[list, tuple], dict, bool, bool, bool) -> Framework
"""
Creates a Pelix framework, installs the given bundles and returns its
instance reference.
If *auto_start* is True, the framework will be started once all bundles
will have been installed
If *wait_for_stop* is True, the method will return only when the framework
will have stopped. This requires *auto_start* to be True.
If *auto_delete* is True, the framework will be deleted once it has
stopped, and the method will return None.
This requires *wait_for_stop* and *auto_start* to be True.
:param bundles: Bundles to initially install (shouldn't be empty if
*wait_for_stop* is True)
:param properties: Optional framework properties
:param auto_start: If True, the framework will be started immediately
:param wait_for_stop: If True, the method will return only when the
framework will have stopped
:param auto_delete: If True, deletes the framework once it stopped.
:return: The framework instance
:raise ValueError: Only one framework can run at a time
"""
# Test if a framework already exists
if FrameworkFactory.is_framework_running(None):
raise ValueError("A framework is already running")
# Create the framework
framework = FrameworkFactory.get_framework(properties)
# Install bundles
context = framework.get_bundle_context()
for bundle in bundles:
context.install_bundle(bundle)
if auto_start:
# Automatically start the framework
framework.start()
if wait_for_stop:
# Wait for the framework to stop
try:
framework.wait_for_stop(None)
except KeyboardInterrupt:
# Stop keyboard interruptions
if framework.get_state() == Bundle.ACTIVE:
framework.stop()
if auto_delete:
# Delete the framework
FrameworkFactory.delete_framework(framework)
framework = None
return framework | 0.000446 |
def exponential(x, y, xscale, yscale):
"""
Two-dimensional oriented exponential decay pattern.
"""
if xscale==0.0 or yscale==0.0:
return x*0.0
with float_error_ignore():
x_w = np.divide(x,xscale)
y_h = np.divide(y,yscale)
return np.exp(-np.sqrt(x_w*x_w+y_h*y_h)) | 0.015873 |
def do_photometry(self):
"""
Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data
in each orientation. This function is called by other functions and generally the user will not need
to interact with it directly.
"""
std_f = np.zeros(4)
data_save = np.zeros_like(self.postcard)
self.obs_flux = np.zeros_like(self.reference_flux)
for i in range(4):
g = np.where(self.qs == i)[0]
wh = np.where(self.times[g] > 54947)
data_save[g] = np.roll(self.postcard[g], int(self.roll_best[i,0]), axis=1)
data_save[g] = np.roll(data_save[g], int(self.roll_best[i,1]), axis=2)
self.target_flux_pixels = data_save[:,self.targets == 1]
self.target_flux = np.sum(self.target_flux_pixels, axis=1)
self.obs_flux[g] = self.target_flux[g] / self.reference_flux[g]
self.obs_flux[g] /= np.median(self.obs_flux[g[wh]])
fitline = np.polyfit(self.times[g][wh], self.obs_flux[g][wh], 1)
std_f[i] = np.max([np.std(self.obs_flux[g][wh]/(fitline[0]*self.times[g][wh]+fitline[1])), 0.001])
self.flux_uncert = std_f | 0.011728 |
def get_subgraphs_by_annotation(graph, annotation, sentinel=None):
"""Stratify the given graph into sub-graphs based on the values for edges' annotations.
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to group by
:param Optional[str] sentinel: The value to stick unannotated edges into. If none, does not keep undefined.
:rtype: dict[str,pybel.BELGraph]
"""
if sentinel is not None:
subgraphs = _get_subgraphs_by_annotation_keep_undefined(graph, annotation, sentinel)
else:
subgraphs = _get_subgraphs_by_annotation_disregard_undefined(graph, annotation)
cleanup(graph, subgraphs)
return subgraphs | 0.007267 |
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError('Factors defined on clusters of variable not'
'present in model')
self.factors.append(factor) | 0.00172 |
def write_path(target, path, value, separator='/'):
"""Write a value deep into a dict building any intermediate keys.
:param target: a dict to write data to
:param path: a key or path to a key (path is delimited by `separator`)
:param value: the value to write to the key
:keyword separator: the separator used in the path (ex. Could be "." for a
json/mongodb type of value)
"""
parts = path.split(separator)
current = target
for part in parts[:-1]:
if part not in current:
current[part] = current = {}
else:
current = current[part]
current[parts[-1]] = value | 0.001543 |
def update_user(self, user_is_artist="", artist_level="", artist_specialty="", real_name="", tagline="", countryid="", website="", bio=""):
"""Update the users profile information
:param user_is_artist: Is the user an artist?
:param artist_level: If the user is an artist, what level are they
:param artist_specialty: If the user is an artist, what is their specialty
:param real_name: The users real name
:param tagline: The users tagline
:param countryid: The users location
:param website: The users personal website
:param bio: The users bio
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
post_data = {}
if user_is_artist:
post_data["user_is_artist"] = user_is_artist
if artist_level:
post_data["artist_level"] = artist_level
if artist_specialty:
post_data["artist_specialty"] = artist_specialty
if real_name:
post_data["real_name"] = real_name
if tagline:
post_data["tagline"] = tagline
if countryid:
post_data["countryid"] = countryid
if website:
post_data["website"] = website
if bio:
post_data["bio"] = bio
response = self._req('/user/profile/update', post_data=post_data)
return response['success'] | 0.003868 |
def evaluate_model(self,
accuracy,
num_steps,
feed_vars=(),
feed_data=None,
summary_tag=None,
print_every=0):
"""Evaluates the given model.
Args:
accuracy: The metric that is being evaluated or a tuple of metrics.
num_steps: The number of steps to run in the evaluator.
feed_vars: A list or tuple of the variables that will be fed.
feed_data: A generator that produces tuples of the same length as
feed_vars.
summary_tag: If provided, the final result of running the model will be
published to this tag.
print_every: Print a summary every so many steps, use 0 to disable.
Returns:
The accuracy.
Raises:
ValueError: If the wrong number of summary tags are provided or previously
running QueueRunners haven't been stopped.
"""
if not hasattr(self, '_saver'):
raise ValueError('Before evaluating, you must initialize the model with '
'load_from_checkpoint, prepare or saver.')
self._run_init_test_vars_op()
if (not isinstance(accuracy, collections.Sequence) or
isinstance(accuracy, six.string_types)):
accuracy = (accuracy,)
if summary_tag:
summary_tag = (summary_tag,)
if summary_tag and len(summary_tag) != len(accuracy):
raise ValueError(
'If summaries are requested, there must be a tag per accuracy node.')
result = self.run_model(accuracy,
num_steps,
feed_vars=feed_vars,
feed_data=feed_data,
print_every=print_every,
allow_initialize=False)
assert len(result) == len(accuracy) + 1, (
'results is wrong length, was %s but should be 1 longer than %s' %
(result, accuracy))
if summary_tag:
self.add_summaries(result[0], *zip(summary_tag, result[1:]))
return result[1:] | 0.006809 |
def descendents(self, method=None, state=None):
"""
Find descendant tasks, optionally filtered by method and/or state.
:param method: (optional) filter for tasks, eg. "buildArch".
:param state: (optional) filter for tasks, eg. task_states.OPEN.
:returns: deferred that when fired returns a list of Tasks.
"""
subtasks = yield self.connection.getTaskDescendents(self.id)
if method:
subtasks = [t for t in subtasks if t.method == method]
if state:
subtasks = [t for t in subtasks if t.state == state]
defer.returnValue(subtasks) | 0.00317 |
def write_word(self, offset, word):
"""
.. _write_word:
Writes one word from a device,
see read_word_.
"""
self._lock = True
if(offset > self.current_max_offset):
raise BUSError("Offset({}) exceeds address space of BUS({})".format(offset, self.current_max_offset))
self.writes += 1
self.truncate.setvalue(word)
for addresspace, device in self.index.items():
if(offset in addresspace):
device.write(offset - self.start_addresses[device], self.truncate.getvalue()) | 0.040241 |
def find_message_handler(self, handler_name, handler_type='primary'):
"""Returns the MessageHandler given its name and type for this class."""
ret = lib.EnvFindDefmessageHandler(
self._env, self._cls, handler_name.encode(), handler_type.encode())
if ret == 0:
raise CLIPSError(self._env)
return MessageHandler(self._env, self._cls, ret) | 0.007634 |
def evaluated_variants(self, case_id):
"""Returns variants that has been evaluated
Return all variants, snvs/indels and svs from case case_id
which have a entry for 'acmg_classification', 'manual_rank', 'dismiss_variant'
or if they are commented.
Args:
case_id(str)
Returns:
variants(iterable(Variant))
"""
# Get all variants that have been evaluated in some way for a case
query = {
'$and': [
{'case_id': case_id},
{
'$or': [
{'acmg_classification': {'$exists': True}},
{'manual_rank': {'$exists': True}},
{'dismiss_variant': {'$exists': True}},
]
}
],
}
# Collect the result in a dictionary
variants = {}
for var in self.variant_collection.find(query):
variants[var['variant_id']] = self.add_gene_info(var)
# Collect all variant comments from the case
event_query = {
'$and': [
{'case': case_id},
{'category': 'variant'},
{'verb': 'comment'},
]
}
# Get all variantids for commented variants
comment_variants = {event['variant_id'] for event in self.event_collection.find(event_query)}
# Get the variant objects for commented variants, if they exist
for var_id in comment_variants:
# Skip if we already added the variant
if var_id in variants:
continue
# Get the variant with variant_id (not _id!)
variant_obj = self.variant(var_id, case_id=case_id)
# There could be cases with comments that refers to non existing variants
# if a case has been reanalysed
if not variant_obj:
continue
variant_obj['is_commented'] = True
variants[var_id] = variant_obj
# Return a list with the variant objects
return variants.values() | 0.002342 |
def join(self, _id):
""" Join a room """
if not SockJSRoomHandler._room.has_key(self._gcls() + _id):
SockJSRoomHandler._room[self._gcls() + _id] = set()
SockJSRoomHandler._room[self._gcls() + _id].add(self) | 0.012397 |
def simulate():
'''instantiate and execute network simulation'''
#separate model execution from parameters for safe import from other files
nest.ResetKernel()
'''
Configuration of the simulation kernel by the previously defined time
resolution used in the simulation. Setting "print_time" to True prints
the already processed simulation time as well as its percentage of the
total simulation time.
'''
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
'''
Configuration of the model `iaf_psc_alpha` and `poisson_generator`
using SetDefaults(). This function expects the model to be the
inserted as a string and the parameter to be specified in a
dictionary. All instances of theses models created after this point
will have the properties specified in the dictionary by default.
'''
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
'''
Creation of the nodes using `Create`. We store the returned handles in
variables for later reference. Here the excitatory and inhibitory, as
well as the poisson generator and two spike detectors. The spike
detectors will later be used to record excitatory and inhibitory
spikes.
'''
nodes_ex = nest.Create("iaf_psc_alpha",NE)
nodes_in = nest.Create("iaf_psc_alpha",NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
print("first exc node: {}".format(nodes_ex[0]))
print("first inh node: {}".format(nodes_in[0]))
'''
distribute membrane potentials
'''
nest.SetStatus(nodes_ex, "V_m",
random.rand(len(nodes_ex))*neuron_params["V_th"])
nest.SetStatus(nodes_in, "V_m",
random.rand(len(nodes_in))*neuron_params["V_th"])
'''
Configuration of the spike detectors recording excitatory and
inhibitory spikes using `SetStatus`, which expects a list of node
handles and a list of parameter dictionaries. Setting the variable
"to_file" to True ensures that the spikes will be recorded in a .gdf
file starting with the string assigned to label. Setting "withtime"
and "withgid" to True ensures that each spike is saved to file by
stating the gid of the spiking neuron and the spike time in one line.
'''
nest.SetStatus(espikes,[{
"label": os.path.join(spike_output_path, label + "-EX"),
"withtime": True,
"withgid": True,
"to_file": True,
}])
nest.SetStatus(ispikes,[{
"label": os.path.join(spike_output_path, label + "-IN"),
"withtime": True,
"withgid": True,
"to_file": True,}])
print("Connecting devices")
'''
Definition of a synapse using `CopyModel`, which expects the model
name of a pre-defined synapse, the name of the customary synapse and
an optional parameter dictionary. The parameters defined in the
dictionary will be the default parameter for the customary
synapse. Here we define one synapse for the excitatory and one for the
inhibitory connections giving the previously defined weights and equal
delays.
'''
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
'''
Connecting the previously defined poisson generator to the excitatory
and inhibitory neurons using the excitatory synapse. Since the poisson
generator is connected to all neurons in the population the default
rule ('all_to_all') of Connect() is used. The synaptic properties are
inserted via syn_spec which expects a dictionary when defining
multiple variables or a string when simply using a pre-defined
synapse.
'''
if Poisson:
nest.Connect(noise,nodes_ex, 'all_to_all', "excitatory")
nest.Connect(noise,nodes_in,'all_to_all', "excitatory")
'''
Connecting the first N_neurons nodes of the excitatory and inhibitory
population to the associated spike detectors using excitatory
synapses. Here the same shortcut for the specification of the synapse
as defined above is used.
'''
nest.Connect(nodes_ex,espikes, 'all_to_all', "excitatory")
nest.Connect(nodes_in,ispikes, 'all_to_all', "excitatory")
print("Connecting network")
print("Excitatory connections")
'''
Connecting the excitatory population to all neurons using the
pre-defined excitatory synapse. Beforehand, the connection parameter
are defined in a dictionary. Here we use the connection rule
'fixed_indegree', which requires the definition of the indegree. Since
the synapse specification is reduced to assigning the pre-defined
excitatory synapse it suffices to insert a string.
'''
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
'''
Connecting the inhibitory population to all neurons using the
pre-defined inhibitory synapse. The connection parameter as well as
the synapse paramtere are defined analogously to the connection from
the excitatory population defined above.
'''
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory")
'''
Storage of the time point after the buildup of the network in a
variable.
'''
endbuild=time.time()
'''
Simulation of the network.
'''
print("Simulating")
nest.Simulate(simtime)
'''
Storage of the time point after the simulation of the network in a
variable.
'''
endsimulate= time.time()
'''
Reading out the total number of spikes received from the spike
detector connected to the excitatory population and the inhibitory
population.
'''
events_ex = nest.GetStatus(espikes,"n_events")[0]
events_in = nest.GetStatus(ispikes,"n_events")[0]
'''
Calculation of the average firing rate of the excitatory and the
inhibitory neurons by dividing the total number of recorded spikes by
the number of neurons recorded from and the simulation time. The
multiplication by 1000.0 converts the unit 1/ms to 1/s=Hz.
'''
rate_ex = events_ex/simtime*1000.0/N_neurons
rate_in = events_in/simtime*1000.0/N_neurons
'''
Reading out the number of connections established using the excitatory
and inhibitory synapse model. The numbers are summed up resulting in
the total number of synapses.
'''
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
'''
Establishing the time it took to build and simulate the network by
taking the difference of the pre-defined time variables.
'''
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
'''
Printing the network properties, firing rates and building times.
'''
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
'''
Plot a raster of the excitatory neurons and a histogram.
'''
if False:
nest.raster_plot.from_device(espikes, hist=True)
nest.raster_plot.from_device(ispikes, hist=True)
nest.raster_plot.show() | 0.010114 |
def add_subsegment(self, subsegment):
"""
Add input subsegment as a child subsegment.
"""
self._check_ended()
subsegment.parent_id = self.id
self.subsegments.append(subsegment) | 0.008929 |
def license():
''' Print the Bokeh license to the console.
Returns:
None
'''
from os.path import join
with open(join(__path__[0], 'LICENSE.txt')) as lic:
print(lic.read()) | 0.004785 |
def datasets_create_new(self, dataset_new_request, **kwargs): # noqa: E501
"""Create a new dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_new(dataset_new_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
else:
(data) = self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
return data | 0.002039 |
def apply_to(self, A):
"""Apply the coordinate transformation to points in A. """
if A.ndim == 1:
A = np.expand_dims(A, axis=0)
rows, cols = A.shape
A_new = np.hstack([A, np.ones((rows, 1))])
A_new = np.transpose(self.T.dot(np.transpose(A_new)))
return A_new[:, 0:cols] | 0.006061 |
def update_from_stats(self, stats):
"""Update columns based on partition statistics"""
sd = dict(stats)
for c in self.columns:
if c not in sd:
continue
stat = sd[c]
if stat.size and stat.size > c.size:
c.size = stat.size
c.lom = stat.lom | 0.005764 |
def save_cursor(self):
"""Push the current cursor position onto the stack."""
self.savepoints.append(Savepoint(copy.copy(self.cursor),
self.g0_charset,
self.g1_charset,
self.charset,
mo.DECOM in self.mode,
mo.DECAWM in self.mode)) | 0.004435 |
def get_channel_access_token(self, channel):
"""Return the token and sig for the given channel
:param channel: the channel or channel name to get the access token for
:type channel: :class:`channel` | :class:`str`
:returns: The token and sig for the given channel
:rtype: (:class:`unicode`, :class:`unicode`)
:raises: None
"""
if isinstance(channel, models.Channel):
channel = channel.name
r = self.oldapi_request(
'GET', 'channels/%s/access_token' % channel).json()
return r['token'], r['sig'] | 0.003339 |
def authenticated(f):
"""Decorator that authenticates to Keystone automatically."""
@wraps(f)
def new_f(self, *args, **kwargs):
if not self.nova_client.client.auth_token:
self.authenticate()
return f(self, *args, **kwargs)
return new_f | 0.003584 |
def convert_errno(e):
"""
Convert an errno value (as from an ``OSError`` or ``IOError``) into a
standard SFTP result code. This is a convenience function for trapping
exceptions in server code and returning an appropriate result.
:param int e: an errno code, as from ``OSError.errno``.
:return: an `int` SFTP error code like ``SFTP_NO_SUCH_FILE``.
"""
if e == errno.EACCES:
# permission denied
return SFTP_PERMISSION_DENIED
elif (e == errno.ENOENT) or (e == errno.ENOTDIR):
# no such file
return SFTP_NO_SUCH_FILE
else:
return SFTP_FAILURE | 0.002937 |
def create_router(self, name, ext_network=None, admin_state_up=True):
'''
Creates a new router
'''
body = {'name': name,
'admin_state_up': admin_state_up}
if ext_network:
net_id = self._find_network_id(ext_network)
body['external_gateway_info'] = {'network_id': net_id}
return self.network_conn.create_router(body={'router': body}) | 0.004773 |