text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def deep_copy(numpy_dict):
"""
Returns a copy of a dictionary whose values are numpy arrays.
Copies their values rather than copying references to them.
"""
out = {}
for key in numpy_dict:
out[key] = numpy_dict[key].copy()
return out | 0.01992 |
def write(bar, offset, data):
"""Write data to PCI board.
Parameters
----------
bar : BaseAddressRegister
BAR to write.
offset : int
Address offset in BAR to write.
data : bytes
Data to write.
Returns
-------
None
Examples
--------
>>> b = pypci.lspci(vendor=0x1147, device=3214)
>>> pypci.write(b[0].bar[2], 0x04, b'\x01')
>>> data = struct.pack('<I', 1234567)
>>> pypci.write(b[0].bar[2], 0x00, data)
"""
if type(data) not in [bytes, bytearray]:
msg = 'data should be bytes or bytearray type'
raise TypeError(msg)
size = len(data)
verify_access_range(bar, offset, size)
if bar.type == 'io': return io_write(bar, offset, data)
if bar.type == 'mem': return mem_write(bar, offset, data)
return | 0.012731 |
def transformer_tall_pretrain_lm_tpu_adafactor_large():
"""Hparams for transformer on LM pretraining on TPU, large model."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2
hparams.batch_size = 4
hparams.multiproblem_mixing_schedule = "constant"
# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.
hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5"
return hparams | 0.019749 |
def addPendingResult( self, ps, jobid ):
"""Add a "pending" result that we expect to get results for.
:param ps: the parameters for the result
:param jobid: an identifier for the pending result"""
k = self._parametersAsIndex(ps)
# retrieve or create the result list
if k in self._results.keys():
rs = self._results[k]
else:
rs = []
self._results[k] = rs
# append the pending result's jobid
rs.insert(0, jobid)
# map job id to parameters to which it refers
self._pending[jobid] = k | 0.006579 |
def set_classifier_mask(self, v, base_mask=True):
"""Computes the mask used to create the training and validation set"""
base = self._base
v = tonparray(v)
a = np.unique(v)
if a[0] != -1 or a[1] != 1:
raise RuntimeError("The labels must be -1 and 1 (%s)" % a)
mask = np.zeros_like(v)
cnt = min([(v == x).sum() for x in a]) * base._tr_fraction
cnt = int(round(cnt))
for i in a:
index = np.where((v == i) & base_mask)[0]
np.random.shuffle(index)
mask[index[:cnt]] = True
base._mask = SparseArray.fromlist(mask)
return SparseArray.fromlist(v) | 0.002963 |
def addSuccess(self, test, capt):
"""
After test completion, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.PASS, test) | 0.010309 |
def _design_poll(self, name, mode, oldres, timeout=5, use_devmode=False):
"""
Poll for an 'async' action to be complete.
:param string name: The name of the design document
:param string mode: One of ``add`` or ``del`` to indicate whether
we should check for addition or deletion of the document
:param oldres: The old result from the document's previous state, if
any
:param float timeout: How long to poll for. If this is 0 then this
function returns immediately
:type oldres: :class:`~couchbase.result.HttpResult`
"""
if not timeout:
return True
if timeout < 0:
raise ArgumentError.pyexc("Interval must not be negative")
t_end = time.time() + timeout
old_rev = None
if oldres:
old_rev = self._doc_rev(oldres)
while time.time() < t_end:
try:
cur_resp = self.design_get(name, use_devmode=use_devmode)
if old_rev and self._doc_rev(cur_resp) == old_rev:
continue
try:
if not self._poll_vq_single(
name, use_devmode, cur_resp.value):
continue
return True
except CouchbaseError:
continue
except CouchbaseError:
if mode == 'del':
# Deleted, whopee!
return True
raise exceptions.TimeoutError.pyexc(
"Wait time for design action completion exceeded") | 0.001225 |
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64') | 0.003945 |
def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) | 0.001418 |
def check_drives(drivename, drivestatus):
""" check the drive status """
return DISK_STATES[int(drivestatus)]["icingastatus"], "Drive '{}': {}".format(
drivename, DISK_STATES[int(drivestatus)]["result"]) | 0.012987 |
def add_edge_fun(graph):
"""
Returns a function that adds an edge to the `graph` checking only the out
node.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
succ, pred, node = graph._succ, graph._pred, graph._node
def add_edge(u, v, **attr):
if v not in succ: # Add nodes.
succ[v], pred[v], node[v] = {}, {}, {}
succ[u][v] = pred[v][u] = attr # Add the edge.
return add_edge | 0.001637 |
def needed_inputs(self):
""" List all the needed inputs of a configured engine
>>> engine = Engine("op1", "op2")
>>> engine.op1.setup(in_name="in", out_name="middle", required=False)
>>> engine.op2.setup(in_name="middle", out_name="out")
>>> engine.op1.append(lambda x:x+2)
>>> engine.op2.append(lambda x:x*2)
>>> engine.op1.select('<lambda>')
>>> list(engine.needed_inputs())
['in']
But now if we unactivate the first component:
>>> engine.op1.clear_selections()
>>> list(engine.needed_inputs())
['middle']
More complex example:
>>> engine = Engine("op1", "op2")
>>> engine.op1.setup(in_name="in", out_name="middle")
>>> engine.op2.setup(in_name=["middle", "in2"], out_name="out")
>>> engine.op1.append(lambda x:x+2)
>>> engine.op2.append(lambda x, y:x*y)
>>> engine.needed_inputs() == {'in', 'in2'}
True
Note that by default the needed input is 'input':
>>> engine = Engine("op1", "op2")
>>> engine.op1.append(lambda x:x+2)
>>> engine.op2.append(lambda x:x*2)
>>> list(engine.needed_inputs())
['input']
"""
needed = set()
available = set() # set of available data
for bnum, block in enumerate(self):
if not block.selected(): # if the block will not be used
continue
if block.in_name is not None:
for in_name in block.in_name:
if not in_name in available:
needed.add(in_name)
elif bnum == 0:
# if the first block
needed.add(Engine.DEFAULT_IN_NAME)
# register the output
available.add(block.out_name)
return needed | 0.002148 |
def wcs_update(self, wcs_text, fb=None):
"""
parses the wcs_text and populates the fields
of a coord_tran instance.
we start from the coord_tran of the input
frame buffer, if any
"""
if (fb):
ct = fb.ct
else:
ct = coord_tran()
if (not ct.valid):
ct.zt = W_UNITARY
# read wcs_text
data = string.split(wcs_text, '\n')
ct.imtitle = data[0]
# we are expecting 8 floats and 1 int
try:
(ct.a, ct.b, ct.c, ct.d,
ct.tx, ct.ty, ct.z1, ct.z2,
ct.zt) = string.split(data[1])
ct.a = float(ct.a)
ct.b = float(ct.b)
ct.c = float(ct.c)
ct.d = float(ct.d)
ct.tx = float(ct.tx)
ct.ty = float(ct.ty)
ct.z1 = float(ct.z1)
ct.z2 = float(ct.z2)
ct.zt = int(ct.zt)
except Exception:
ct.imtitle = "[NO WCS]"
ct.a = 1
ct.d = 1
ct.b = 0
ct.c = 0
ct.tx = 0
ct.ty = 0
ct.zt = W_UNITARY
ct.valid += 1
# determine the best format for WCS output
if (ct.valid and ct.zt == W_LINEAR):
z1 = ct.z1
z2 = ct.z2
zrange = abs(z1 - z2)
zavg = (abs(z1) + abs(z2)) / 2.0
if (zrange < 100.0 and zavg < 200.0):
ct.format = " %7.2f %7.2f %7.3f%c"
elif (zrange > 99999.0 or zavg > 99999.0):
ct.format = " %7.2f %7.2f %7.3g%c"
else:
ct.format = W_DEFFORMAT
else:
ct.format = " %7.2f %7.2f %7.0f%c"
# add_mapping, if we can
if (len(data) < 4):
return(ct)
# we are expecting 1 string, 2 floats, and 6 int
try:
print("updating WCS: %s" % str(data[2]))
(ct.region, ct.sx, ct.sy, ct.snx,
ct.sny, ct.dx, ct.dy, ct.dnx,
ct.dny) = string.split(data[2])
ct.sx = float(ct.sx)
ct.sy = float(ct.sy)
ct.snx = int(ct.snx)
ct.sny = int(ct.sny)
# dx, dy: offset into frame where actual data starts
ct.dx = int(ct.dx)
ct.dy = int(ct.dy)
# dnx, dny: length of actual data in frame from offsets
ct.dnx = int(ct.dnx)
ct.dny = int(ct.dny)
ct.ref = string.strip(data[3])
# if this works, we also have the real size of the image
fb.img_width = ct.dnx + 1 # for some reason, the width is always 1 pixel smaller...
fb.img_height = ct.dny
except Exception:
ct.region = 'none'
ct.sx = 1.0
ct.sy = 1.0
ct.snx = fb.width
ct.sny = fb.height
ct.dx = 1
ct.dy = 1
ct.dnx = fb.width
ct.dny = fb.height
ct.ref = 'none'
return (ct) | 0.000902 |
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret | 0.001913 |
def store_equal(self):
"""
Takes a tetrad class object and populates array with random
quartets sampled equally among splits of the tree so that
deep splits are not overrepresented relative to rare splits,
like those near the tips.
"""
with h5py.File(self.database.input, 'a') as io5:
fillsets = io5["quartets"]
## require guidetree
if not os.path.exists(self.files.tree):
raise IPyradWarningExit(
"To use sampling method 'equal' requires a guidetree")
tre = ete3.Tree(self.files.tree)
tre.unroot()
tre.resolve_polytomy(recursive=True)
## randomly sample internals splits
splits = [([self.samples.index(z.name) for z in i],
[self.samples.index(z.name) for z in j]) \
for (i, j) in tre.get_edges()]
## only keep internal splits, not single tip edges
splits = [i for i in splits if all([len(j) > 1 for j in i])]
## how many min quartets shoudl be equally sampled from each split
squarts = self.params.nquartets // len(splits)
## keep track of how many iterators are saturable.
saturable = 0
## turn each into an iterable split sampler
## if the nquartets for that split is small, then sample all,
## if it is big then make it a random sampler for that split.
qiters = []
## iterate over splits sampling quartets evenly
for idx, split in enumerate(splits):
## if small number at this split then sample all possible sets
## we will exhaust this quickly and then switch to random for
## the larger splits.
total = n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2)
if total < squarts*2:
qiter = (i+j for (i, j) in itertools.product(
itertools.combinations(split[0], 2),
itertools.combinations(split[1], 2)))
saturable += 1
## else create random sampler across that split, this is slower
## because it can propose the same split repeatedly and so we
## have to check it against the 'sampled' set.
else:
qiter = (random_product(split[0], split[1]) for _ \
in xrange(self.params.nquartets))
## store all iterators into a list
qiters.append((idx, qiter))
## create infinite cycler of qiters
qitercycle = itertools.cycle(qiters)
## store visited quartets
sampled = set()
## fill chunksize at a time
i = 0
empty = set()
edge_targeted = 0
random_targeted = 0
## keep filling quartets until nquartets are sampled.
while i < self.params.nquartets:
## grab the next iterator
cycle, qiter = qitercycle.next()
## sample from iterators, store sorted set.
try:
qrtsamp = tuple(sorted(qiter.next()))
if qrtsamp not in sampled:
sampled.add(qrtsamp)
edge_targeted += 1
i += 1
## print progress bar update to engine stdout
if not i % self._chunksize:
print(min(i, self.params.nquartets))
except StopIteration:
empty.add(cycle)
if len(empty) == saturable:
break
## if array is not full then add random samples
while i <= self.params.nquartets:
newset = tuple(sorted(np.random.choice(
range(len(self.samples)), 4, replace=False)))
if newset not in sampled:
sampled.add(newset)
random_targeted += 1
i += 1
## print progress bar update to engine stdout
if not i % self._chunksize:
print(min(i, self.params.nquartets))
## store into database
print(self.params.nquartets)
fillsets[:] = np.array(tuple(sampled))
del sampled | 0.009506 |
async def edit_message_caption(self, chat_id: typing.Union[base.Integer, base.String, None] = None,
message_id: typing.Union[base.Integer, None] = None,
inline_message_id: typing.Union[base.String, None] = None,
caption: typing.Union[base.String, None] = None,
parse_mode: typing.Union[base.String, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup,
None] = None) -> types.Message or base.Boolean:
"""
Use this method to edit captions of messages sent by the bot or via the bot (for inline bots).
Source: https://core.telegram.org/bots/api#editmessagecaption
:param chat_id: Required if inline_message_id is not specified
Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String, None]`
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:type message_id: :obj:`typing.Union[base.Integer, None]`
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:type inline_message_id: :obj:`typing.Union[base.String, None]`
:param caption: New caption of the message
:type caption: :obj:`typing.Union[base.String, None]`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in your bot's message.
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param reply_markup: A JSON-serialized object for an inline keyboard
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`
:return: On success, if edited message is sent by the bot, the edited Message is returned,
otherwise True is returned.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
if self.parse_mode:
payload.setdefault('parse_mode', self.parse_mode)
result = await self.request(api.Methods.EDIT_MESSAGE_CAPTION, payload)
if isinstance(result, bool):
return result
return types.Message(**result) | 0.00833 |
def to_cdms2(dataarray, copy=True):
"""Convert a DataArray into a cdms2 variable
"""
# we don't want cdms2 to be a hard dependency
import cdms2
def set_cdms2_attrs(var, attrs):
for k, v in attrs.items():
setattr(var, k, v)
# 1D axes
axes = []
for dim in dataarray.dims:
coord = encode(dataarray.coords[dim])
axis = cdms2.createAxis(coord.values, id=dim)
set_cdms2_attrs(axis, coord.attrs)
axes.append(axis)
# Data
var = encode(dataarray)
cdms2_var = cdms2.createVariable(var.values, axes=axes, id=dataarray.name,
mask=pd.isnull(var.values), copy=copy)
# Attributes
set_cdms2_attrs(cdms2_var, var.attrs)
# Curvilinear and unstructured grids
if dataarray.name not in dataarray.coords:
cdms2_axes = OrderedDict()
for coord_name in set(dataarray.coords.keys()) - set(dataarray.dims):
coord_array = dataarray.coords[coord_name].to_cdms2()
cdms2_axis_cls = (cdms2.coord.TransientAxis2D
if coord_array.ndim else
cdms2.auxcoord.TransientAuxAxis1D)
cdms2_axis = cdms2_axis_cls(coord_array)
if cdms2_axis.isLongitude():
cdms2_axes['lon'] = cdms2_axis
elif cdms2_axis.isLatitude():
cdms2_axes['lat'] = cdms2_axis
if 'lon' in cdms2_axes and 'lat' in cdms2_axes:
if len(cdms2_axes['lon'].shape) == 2:
cdms2_grid = cdms2.hgrid.TransientCurveGrid(
cdms2_axes['lat'], cdms2_axes['lon'])
else:
cdms2_grid = cdms2.gengrid.AbstractGenericGrid(
cdms2_axes['lat'], cdms2_axes['lon'])
for axis in cdms2_grid.getAxisList():
cdms2_var.setAxis(cdms2_var.getAxisIds().index(axis.id), axis)
cdms2_var.setGrid(cdms2_grid)
return cdms2_var | 0.000504 |
def update_short(self, **kwargs):
"""
Update the short optional arguments (those with one leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the short
argument names
Examples
--------
Setting::
>>> parser.update_short(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', short='s')
>>> parser.update_arg('something_else', short='se')
which in turn is basically comparable to::
>>> parser.add_argument('-s', '--something', ...)
>>> parser.add_argument('-se', '--something_else', ...)
See Also
--------
update_shortf, update_long"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, short=val) | 0.001776 |
def dispatch_event(self, event_, **kwargs):
"""
Dispatch section event.
Notes:
You MUST NOT call event.trigger() directly because
it will circumvent the section settings as well
as ignore the section tree.
If hooks are disabled somewhere up in the tree, and enabled
down below, events will still be dispatched down below because
that's where they originate.
"""
if self.settings.hooks_enabled:
result = self.hooks.dispatch_event(event_, **kwargs)
if result is not None:
return result
# Must also dispatch the event in parent section
if self.section:
return self.section.dispatch_event(event_, **kwargs)
elif self.section:
# Settings only apply to one section, so must still
# dispatch the event in parent sections recursively.
self.section.dispatch_event(event_, **kwargs) | 0.001976 |
def persistent_id(self, obj):
"""
Provide a persistent ID for "saving" GLC objects by reference. Return
None for all non GLC objects.
Parameters
----------
obj: Name of the object whose persistent ID is extracted.
Returns
--------
None if the object is not a GLC object. (ClassName, relative path)
if the object is a GLC object.
Notes
-----
Borrowed from pickle docs (https://docs.python.org/2/library/_pickle.html)
For the benefit of object persistence, the pickle module supports the
notion of a reference to an object outside the pickled data stream.
To pickle objects that have an external persistent id, the pickler must
have a custom persistent_id() method that takes an object as an argument and
returns either None or the persistent id for that object.
For GLC objects, the persistent_id is merely a relative file path (within
the ZIP archive) to the GLC archive where the GLC object is saved. For
example:
(SFrame, 'sframe-save-path')
(SGraph, 'sgraph-save-path')
(Model, 'model-save-path')
"""
# Get the class of the object (if it can be done)
obj_class = None if not hasattr(obj, '__class__') else obj.__class__
if obj_class is None:
return None
# If the object is a GLC class.
if _is_not_pickle_safe_gl_class(obj_class):
if (id(obj) in self.gl_object_memo):
# has already been pickled
return (None, None, id(obj))
else:
# Save the location of the GLC object's archive to the pickle file.
relative_filename = str(_uuid.uuid4())
filename = _os.path.join(self.gl_temp_storage_path, relative_filename)
self.mark_for_delete -= set([filename])
# Save the GLC object
obj.save(filename)
# Memoize.
self.gl_object_memo.add(id(obj))
# Return the tuple (class_name, relative_filename) in archive.
return (_get_gl_class_type(obj.__class__), relative_filename, id(obj))
# Not a GLC object. Default to cloud pickle
else:
return None | 0.003398 |
def correlation_decomp(P, obs1, obs2=None, times=[1], k=None):
r"""Time-correlation for equilibrium experiment - via decomposition.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvalues and eigenvectors to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
if obs2 is None:
obs2 = obs1
R, D, L = rdl_decomposition(P, k=k)
"""Stationary vector"""
mu = L[0, :]
"""Extract eigenvalues"""
ev = np.diagonal(D)
"""Amplitudes"""
amplitudes = np.dot(mu * obs1, R) * np.dot(L, obs2)
"""Propgate eigenvalues"""
times = np.asarray(times)
ev_t = ev[np.newaxis, :] ** times[:, np.newaxis]
"""Compute result"""
res = np.dot(ev_t, amplitudes)
"""Truncate imaginary part - should be zero anyways"""
res = res.real
return res | 0.000834 |
def _get_all_unmapped_reads(self, fout):
'''Writes all unmapped reads to fout'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(until_eof=True):
if read.is_unmapped:
print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout) | 0.009091 |
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
"""
raw = kwargs.pop('raw',False)
if raw:
for obj in objs:
publish_latex(obj)
else:
display(*objs, include=['text/plain','text/latex']) | 0.005405 |
def write(self, data, multithread=True, **kwargs):
'''
:param data: Data to be written
:type data: str or mmap object
:param multithread: If True, sends multiple write requests asynchronously
:type multithread: boolean
Writes the data *data* to the file.
.. note::
Writing to remote files is append-only. Using :meth:`seek`
does not affect where the next :meth:`write` will occur.
'''
if USING_PYTHON2:
self._write2(data, multithread=multithread, **kwargs)
else:
# In python3, the underlying system methods use the 'bytes' type, not 'string'
#
# This is, hopefully, a temporary hack. It is not a good idea for two reasons:
# 1) Performance, we need to make a pass on the data, and need to allocate
# another buffer of similar size
# 2) The types are wrong. The "bytes" type should be visible to the caller
# of the write method, instead of being hidden.
# Should we throw an exception if the file is opened in binary mode,
# and the data is unicode/text?
if isinstance(data, str):
bt = data.encode("utf-8")
elif isinstance(data, bytearray):
bt = bytes(data)
elif isinstance(data, bytes):
bt = data
elif isinstance(data, mmap.mmap):
bt = bytes(data)
else:
raise DXFileError("Invalid type {} for write data argument".format(type(data)))
assert(isinstance(bt, bytes))
self._write2(bt, multithread=multithread, **kwargs) | 0.005248 |
def dry_run_scan(self, scan_id, targets):
""" Dry runs a scan. """
os.setsid()
for _, target in enumerate(targets):
host = resolve_hostname(target[0])
if host is None:
logger.info("Couldn't resolve %s.", target[0])
continue
port = self.get_scan_ports(scan_id, target=target[0])
logger.info("%s:%s: Dry run mode.", host, port)
self.add_scan_log(scan_id, name='', host=host,
value='Dry run result')
self.finish_scan(scan_id) | 0.003466 |
def _populate_user_from_dn_regex_negation(self):
"""
Populate the given profile object flags from AUTH_LDAP_PROFILE_FLAGS_BY_DN_REGEX.
Returns True if the profile was modified
"""
for field, regex in self.settings.USER_FLAGS_BY_DN_REGEX_NEGATION.items():
field_value = True
if re.search(regex, self._get_user_dn(), re.IGNORECASE):
field_value = False
setattr(self._user, field, field_value) | 0.008299 |
def initialize_path(self, path_num=None):
"""
initialize consumer for next path
"""
self.state = copy(self.initial_state)
return self.state | 0.011173 |
def bundle_biomass_components(model, reaction):
"""
Return bundle biomass component reactions if it is not one lumped reaction.
There are two basic ways of specifying the biomass composition. The most
common is a single lumped reaction containing all biomass precursors.
Alternatively, the biomass equation can be split into several reactions
each focusing on a different macromolecular component for instance
a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+
d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) +
h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi.
This function aims to identify if the given biomass reaction 'reaction',
is a lumped all-in-one reaction, or whether it is just the final
composing reaction of all macromolecular components. It is important to
identify which other reaction belong to a given biomass reaction to be
able to identify universal biomass components or calculate detailed
precursor stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
list
One or more reactions that qualify as THE biomass equation together.
Notes
-----
Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split
reaction is comparatively low:
Any reaction with less or equal to 15 metabolites can
probably be counted as a split reaction containing Ash, Phospholipids,
Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA,
DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more
than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP)
+ 4 Deoxy-Nucleotides) can be considered a lumped reaction.
Anything in between will be treated conservatively as a lumped reaction.
For split reactions, after removing any of the metabolites associated with
growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the
only remaining metabolites should be generalized macromolecule precursors
e.g. Protein, Phospholipids etc. Each of these have their own composing
reactions. Hence we include the reactions of these metabolites in the
set that ultimately makes up the returned list of reactions that together
make up the biomass equation.
"""
if len(reaction.metabolites) >= 16:
return [reaction]
id_of_main_compartment = helpers.find_compartment_id_in_model(model,
'c')
gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", 'MNXM9']
try:
gam = set([helpers.find_met_in_model(
model, met, id_of_main_compartment)[0] for met in gam_mets])
except RuntimeError:
gam = set()
regex = re.compile('^{}(_[a-zA-Z]+?)*?$'.format('biomass'),
re.IGNORECASE)
biomass_metabolite = set(model.metabolites.query(regex))
macromolecules = set(reaction.metabolites) - gam - biomass_metabolite
bundled_reactions = set()
for met in macromolecules:
bundled_reactions = bundled_reactions | set(met.reactions)
return list(bundled_reactions) | 0.000298 |
async def finish_pairing(self, pin):
"""Finish pairing process."""
self.srp.step1(pin)
pub_key, proof = self.srp.step2(self._atv_pub_key, self._atv_salt)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_PUBLIC_KEY: pub_key,
tlv8.TLV_PROOF: proof})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
atv_proof = pairing_data[tlv8.TLV_PROOF]
log_binary(_LOGGER, 'Device', Proof=atv_proof)
encrypted_data = self.srp.step3()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x05',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
encrypted_data = pairing_data[tlv8.TLV_ENCRYPTED_DATA]
return self.srp.step4(encrypted_data) | 0.001972 |
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict):
"""Prepares the Fabric and configures the device.
This routine calls the fabric class to prepare the fabric when
a firewall is created. It also calls the device manager to
configure the device. It updates the database with the final
result.
"""
is_fw_virt = self.is_device_virtual()
ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt,
fw_constants.RESULT_FW_CREATE_INIT)
if not ret:
LOG.error("Prepare Fabric failed")
return
else:
self.update_fw_db_final_result(fw_dict.get('fw_id'), (
fw_constants.RESULT_FW_CREATE_DONE))
ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
self.fwid_attr[tenant_id].fw_drvr_created(True)
self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS')
LOG.info("FW device create returned success for tenant %s",
tenant_id)
else:
LOG.error("FW device create returned failure for tenant %s",
tenant_id) | 0.001577 |
async def check_authorized(self, identity):
"""
Works like :func:`Security.identity`, but when check is failed
:func:`UnauthorizedError` exception is raised.
:param identity: Claim
:return: Checked claim or return ``None``
:raise: :func:`UnauthorizedError`
"""
identify = await self.identify(identity)
if identify is None:
raise UnauthorizedError()
return identify | 0.004376 |
def render_stmt_graph(statements, reduce=True, english=False, rankdir=None,
agent_style=None):
"""Render the statement hierarchy as a pygraphviz graph.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
reduce : bool
Whether to perform a transitive reduction of the edges in the graph.
Default is True.
english : bool
If True, the statements in the graph are represented by their
English-assembled equivalent; otherwise they are represented as
text-formatted Statements.
rank_dir : str or None
Argument to pass through to the pygraphviz `AGraph` constructor
specifying graph layout direction. In particular, a value of 'LR'
specifies a left-to-right direction. If None, the pygraphviz default
is used.
agent_style : dict or None
Dict of attributes specifying the visual properties of nodes. If None,
the following default attributes are used::
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
Returns
-------
pygraphviz.AGraph
Pygraphviz graph with nodes representing statements and edges pointing
from supported statements to supported_by statements.
Examples
--------
Pattern for getting statements and rendering as a Graphviz graph:
>>> from indra.preassembler.hierarchy_manager import hierarchies
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(hierarchies, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> graph = render_stmt_graph(pa.related_stmts)
>>> graph.write('example_graph.dot') # To make the DOT file
>>> graph.draw('example_graph.png', prog='dot') # To make an image
Resulting graph:
.. image:: /images/example_graph.png
:align: center
:alt: Example statement graph rendered by Graphviz
"""
from indra.assemblers.english import EnglishAssembler
# Set the default agent formatting properties
if agent_style is None:
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
# Sets to store all of the nodes and edges as we recursively process all
# of the statements
nodes = set([])
edges = set([])
stmt_dict = {}
# Recursive function for processing all statements
def process_stmt(stmt):
nodes.add(str(stmt.matches_key()))
stmt_dict[str(stmt.matches_key())] = stmt
for sby_ix, sby_stmt in enumerate(stmt.supported_by):
edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key())))
process_stmt(sby_stmt)
# Process all of the top-level statements, getting the supporting statements
# recursively
for stmt in statements:
process_stmt(stmt)
# Create a networkx graph from the nodes
nx_graph = nx.DiGraph()
nx_graph.add_edges_from(edges)
# Perform transitive reduction if desired
if reduce:
nx_graph = nx.algorithms.dag.transitive_reduction(nx_graph)
# Create a pygraphviz graph from the nx graph
try:
pgv_graph = pgv.AGraph(name='statements', directed=True,
rankdir=rankdir)
except NameError:
logger.error('Cannot generate graph because '
'pygraphviz could not be imported.')
return None
for node in nx_graph.nodes():
stmt = stmt_dict[node]
if english:
ea = EnglishAssembler([stmt])
stmt_str = ea.make_model()
else:
stmt_str = str(stmt)
pgv_graph.add_node(node,
label='%s (%d)' % (stmt_str, len(stmt.evidence)),
**agent_style)
pgv_graph.add_edges_from(nx_graph.edges())
return pgv_graph | 0.000475 |
def _get_context_name(self, app=None):
"""Generate the name of the context variable for this component & app.
Because we store the ``context`` in a Local so the component
can be used across multiple apps, we cannot store the context on the
instance itself. This function will generate a unique and predictable
key in which to store the context.
Returns:
str: The name of the context variable to set and get the context
from.
"""
elements = [
self.__class__.__name__,
'context',
text_type(id(self)),
]
if app:
elements.append(text_type(id(app)))
else:
try:
elements.append(text_type(id(self.app)))
except RuntimeError:
pass
return '_'.join(elements) | 0.002273 |
def GetReportDescriptor(cls):
"""Returns plugins' metadata in ApiReportDescriptor."""
if cls.TYPE is None:
raise ValueError("%s.TYPE is unintialized." % cls)
if cls.TITLE is None:
raise ValueError("%s.TITLE is unintialized." % cls)
if cls.SUMMARY is None:
raise ValueError("%s.SUMMARY is unintialized." % cls)
return rdf_report_plugins.ApiReportDescriptor(
type=cls.TYPE,
name=cls.__name__,
title=cls.TITLE,
summary=cls.SUMMARY,
requires_time_range=cls.REQUIRES_TIME_RANGE) | 0.00722 |
def by_pdb(self, pdb_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
'''Returns a list of all PDB files which contain protein sequences similar to the protein sequences of pdb_id.
Only protein chains are considered in the matching so e.g. some results may have DNA or RNA chains or ligands
while some may not.
'''
self.log('BLASTing {0}'.format(pdb_id), silent, colortext.pcyan)
# Preamble
matrix = matrix or self.matrix
cut_off = cut_off or self.cut_off
sequence_identity_cut_off = sequence_identity_cut_off or self.sequence_identity_cut_off
# Parse PDB file
p = self.bio_cache.get_pdb_object(pdb_id)
chain_ids = sorted(p.seqres_sequences.keys())
assert(chain_ids)
# Run BLAST over all chains
hits = set(self.blast_by_pdb_chain(pdb_id, chain_ids[0], cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile, silent = silent))
for chain_id in chain_ids[1:]:
chain_hits = self.blast_by_pdb_chain(pdb_id, chain_id, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile)
if chain_hits != None:
# None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
hits = hits.intersection(set(chain_hits))
return sorted(hits) | 0.023914 |
def gradient_compression_params(args: argparse.Namespace) -> Optional[Dict[str, Any]]:
"""
:param args: Arguments as returned by argparse.
:return: Gradient compression parameters or None.
"""
if args.gradient_compression_type is None:
return None
else:
return {'type': args.gradient_compression_type, 'threshold': args.gradient_compression_threshold} | 0.007673 |
def in1d_sorted(ar1, ar2):
"""
Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster.
"""
if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash
return []
inds = ar2.searchsorted(ar1)
inds[inds == len(ar2)] = 0
return ar2[inds] == ar1 | 0.008696 |
def data(self):
"""Data representation of the datasource sent to the frontend"""
order_by_choices = []
# self.column_names return sorted column_names
for s in self.column_names:
s = str(s or '')
order_by_choices.append((json.dumps([s, True]), s + ' [asc]'))
order_by_choices.append((json.dumps([s, False]), s + ' [desc]'))
verbose_map = {'__timestamp': 'Time'}
verbose_map.update({
o.metric_name: o.verbose_name or o.metric_name
for o in self.metrics
})
verbose_map.update({
o.column_name: o.verbose_name or o.column_name
for o in self.columns
})
return {
# simple fields
'id': self.id,
'column_formats': self.column_formats,
'description': self.description,
'database': self.database.data, # pylint: disable=no-member
'default_endpoint': self.default_endpoint,
'filter_select': self.filter_select_enabled, # TODO deprecate
'filter_select_enabled': self.filter_select_enabled,
'name': self.name,
'datasource_name': self.datasource_name,
'type': self.type,
'schema': self.schema,
'offset': self.offset,
'cache_timeout': self.cache_timeout,
'params': self.params,
'perm': self.perm,
'edit_url': self.url,
# sqla-specific
'sql': self.sql,
# one to many
'columns': [o.data for o in self.columns],
'metrics': [o.data for o in self.metrics],
# TODO deprecate, move logic to JS
'order_by_choices': order_by_choices,
'owners': [owner.id for owner in self.owners],
'verbose_map': verbose_map,
'select_star': self.select_star,
} | 0.001042 |
def JoinPath(stem="", *parts):
"""A sane version of os.path.join.
The intention here is to append the stem to the path. The standard module
removes the path if the stem begins with a /.
Args:
stem: The stem to join to.
*parts: parts of the path to join. The first arg is always the root and
directory traversal is not allowed.
Returns:
a normalized path.
"""
# Ensure all path components are unicode
parts = [SmartUnicode(path) for path in parts]
result = (stem + NormalizePath(u"/".join(parts))).replace("//", "/")
result = result.rstrip("/")
return result or "/" | 0.011419 |
def create_chart(self, html_path='index.html', data_path='data.json',
js_path='rickshaw.min.js', css_path='rickshaw.min.css',
html_prefix=''):
'''Save bearcart output to HTML and JSON.
Parameters
----------
html_path: string, default 'index.html'
Path for html output
data_path: string, default 'data.json'
Path for data JSON output
js_path: string, default 'rickshaw.min.js'
If passed, the Rickshaw javascript library will be saved to the
path. The file must be named "rickshaw.min.js"
css_path: string, default 'rickshaw.min.css'
If passed, the Rickshaw css library will be saved to the
path. The file must be named "rickshaw.min.css"
html_prefix: Prefix path to be appended to all the other paths for file
creation, but not in the generated html file. This is needed if the
html file does not live in the same folder as the running python
script.
Returns
-------
HTML, JSON, JS, and CSS
Example
--------
>>>vis.create_chart(html_path='myvis.html', data_path='visdata.json'),
js_path='rickshaw.min.js',
cs_path='rickshaw.min.css')
'''
self.template_vars.update({'data_path': str(data_path),
'js_path': js_path,
'css_path': css_path,
'chart_id': self.chart_id,
'y_axis_id': self.y_axis_id,
'legend_id': self.legend_id,
'slider_id': self.slider_id})
self._build_graph()
html = self.env.get_template('bcart_template.html')
self.HTML = html.render(self.template_vars)
with open(os.path.join(html_prefix, html_path), 'w') as f:
f.write(self.HTML)
with open(os.path.join(html_prefix, data_path), 'w') as f:
json.dump(self.json_data, f, sort_keys=True, indent=4,
separators=(',', ': '))
if js_path:
js = resource_string('bearcart', 'rickshaw.min.js')
with open(os.path.join(html_prefix, js_path), 'w') as f:
f.write(js)
if css_path:
css = resource_string('bearcart', 'rickshaw.min.css')
with open(os.path.join(html_prefix, css_path), 'w') as f:
f.write(css) | 0.001931 |
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_worker_id == worker._id or not other_task:
continue
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task | 0.001229 |
def documentation(default=None, api_version=None, api=None, **kwargs):
"""returns documentation for the current api"""
api_version = default or api_version
if api:
return api.http.documentation(base_url="", api_version=api_version) | 0.003984 |
def tgread_bytes(self):
"""
Reads a Telegram-encoded byte array, without the need of
specifying its length.
"""
first_byte = self.read_byte()
if first_byte == 254:
length = self.read_byte() | (self.read_byte() << 8) | (
self.read_byte() << 16)
padding = length % 4
else:
length = first_byte
padding = (length + 1) % 4
data = self.read(length)
if padding > 0:
padding = 4 - padding
self.read(padding)
return data | 0.003442 |
def getModelPosterior(self,min):
"""
USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR
"""
Sigma = self.getLaplaceCovar(min)
n_params = self.vd.getNumberScales()
ModCompl = 0.5*n_params*SP.log(2*SP.pi)+0.5*SP.log(SP.linalg.det(Sigma))
RV = min['LML']+ModCompl
return RV | 0.011364 |
def _add_details(self, info):
"""
The 'id' and 'claim_id' attributes are not supplied directly, but
included as part of the 'href' value.
"""
super(QueueMessage, self)._add_details(info)
if self.href is None:
return
parsed = urllib.parse.urlparse(self.href)
self.id = parsed.path.rsplit("/", 1)[-1]
query = parsed.query
if query:
self.claim_id = query.split("claim_id=")[-1] | 0.004184 |
def parse_dereplicated_uc(dereplicated_uc_lines):
""" Return dict of seq ID:dereplicated seq IDs from dereplicated .uc lines
dereplicated_uc_lines: list of lines of .uc file from dereplicated seqs from
usearch61 (i.e. open file of abundance sorted .uc data)
"""
dereplicated_clusters = {}
seed_hit_ix = 0
seq_id_ix = 8
seed_id_ix = 9
for line in dereplicated_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
dereplicated_clusters[curr_line[seq_id_ix]] = []
if curr_line[seed_hit_ix] == "H":
curr_seq_id = curr_line[seq_id_ix]
dereplicated_clusters[curr_line[seed_id_ix]].append(curr_seq_id)
return dereplicated_clusters | 0.002389 |
def srv_event(token, hits, url=RBA_URL):
"""Serve event to RainbowAlga"""
if url is None:
log.error("Please provide a valid RainbowAlga URL.")
return
ws_url = url + '/message'
if isinstance(hits, pd.core.frame.DataFrame):
pos = [tuple(x) for x in hits[['x', 'y', 'z']].values]
time = list(hits['time'])
tot = list(hits['tot'])
elif isinstance(hits, Table):
pos = list(zip(hits.pos_x, hits.pos_y, hits.pos_z))
time = list(hits.time)
tot = list(hits.tot)
else:
log.error(
"No calibration information found in hits (type: {0})".format(
type(hits)
)
)
return
event = {
"hits": {
'pos': pos,
'time': time,
'tot': tot,
}
}
srv_data(ws_url, token, event, 'event') | 0.00114 |
def difference_update(self, other):
"""Update self to include only the difference with other."""
other = set(other)
indices_to_delete = set()
for i, elem in enumerate(self):
if elem in other:
indices_to_delete.add(i)
if indices_to_delete:
self._delete_values_by_index(indices_to_delete) | 0.03268 |
def from_shypo(cls, xml, encoding='utf-8'):
"""Constructor from xml element *SHYPO*
:param xml.etree.ElementTree xml: the xml *SHYPO* element
:param string encoding: encoding of the xml
"""
score = float(xml.get('SCORE'))
words = [Word.from_whypo(w_xml, encoding) for w_xml in xml.findall('WHYPO') if w_xml.get('WORD') not in ['<s>', '</s>']]
return cls(words, score) | 0.007059 |
def sync(self, command, arguments, tags=None, id=None):
"""
Same as self.raw except it do a response.get() waiting for the command execution to finish and reads the result
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Result object
"""
response = self.raw(command, arguments, tags=tags, id=id)
result = response.get()
if result.state != 'SUCCESS':
raise ResultError(msg='%s' % result.data, code=result.code)
return result | 0.007335 |
def digit_to_query_time(digit: str) -> List[int]:
"""
Given a digit in the utterance, return a list of the times that it corresponds to.
"""
if len(digit) > 2:
return [int(digit), int(digit) + TWELVE_TO_TWENTY_FOUR]
elif int(digit) % 12 == 0:
return [0, 1200, 2400]
return [int(digit) * HOUR_TO_TWENTY_FOUR,
(int(digit) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY] | 0.006912 |
def report(self):
"""
Create reports of the findings
"""
# Initialise a variable to store the results
data = ''
for sample in self.metadata:
if sample[self.analysistype].primers != 'NA':
# Set the name of the strain-specific report
sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir,
'{}_{}.csv'.format(sample.name, self.analysistype))
# Populate the strain-specific string with header, and strain name
strainspecific = 'Strain,{},\n{},'.format(','.join(sorted(sample[self.analysistype].targets)),
sample.name)
# Iterate through all the genes in the organism-specific analysis
for gene in sorted(sample[self.analysistype].targets):
try:
# Extract the percent identity
percentidentity = sample[self.analysistype].blastresults[gene]['percent_identity']
# If the % identity is greater than the cutoff of 50%, the gene is considered to be present
if percentidentity > 50:
strainspecific += '{},'.format(percentidentity)
else:
strainspecific += '-,'
# If there are no BLAST results, then the gene is absent
except KeyError:
strainspecific += '-,'
strainspecific += '\n'
# Open and write the data to the strain-specific report
with open(sample[self.analysistype].report, 'w') as specificreport:
specificreport.write(strainspecific)
# Add all the data from each strain to the cumulative data string
data += strainspecific
# Open and write the cumulative data to the cumulative report
with open(os.path.join(self.reportdir, '{}.csv'.format(self.analysistype)), 'w') as report:
report.write(data) | 0.005489 |
def _translate_special_values(self, obj_to_translate):
"""
you may want to write plugins for values which are not known before build:
e.g. id of built image, base image name,... this method will therefore
translate some reserved values to the runtime values
"""
translation_dict = {
'BUILT_IMAGE_ID': self.workflow.builder.image_id,
'BUILD_DOCKERFILE_PATH': self.workflow.builder.source.dockerfile_path,
'BUILD_SOURCE_PATH': self.workflow.builder.source.path,
}
if self.workflow.builder.base_image:
translation_dict['BASE_IMAGE'] = self.workflow.builder.base_image.to_str()
if isinstance(obj_to_translate, dict):
# Recurse into dicts
translated_dict = copy.deepcopy(obj_to_translate)
for key, value in obj_to_translate.items():
translated_dict[key] = self._translate_special_values(value)
return translated_dict
elif isinstance(obj_to_translate, list):
# Iterate over lists
return [self._translate_special_values(elem)
for elem in obj_to_translate]
else:
return translation_dict.get(obj_to_translate, obj_to_translate) | 0.003912 |
def is_searchable(self):
"""A bool value that indicates whether the address is a valid address
to search by."""
return self.raw or (self.is_valid_country and
(not self.state or self.is_valid_state)) | 0.015625 |
def prepare_parser(program):
"""Create and populate an argument parser."""
parser = ArgumentParser(
description=PROG_DESCRIPTION, prog=program,
formatter_class=HelpFormatter,
add_help=False)
parser.add_argument(
"-h", "--help", action=MinimalHelpAction, help=argparse.SUPPRESS)
# Register sub-commands.
submodules = (
"nodes", "machines", "devices", "controllers",
"fabrics", "vlans", "subnets", "spaces",
"files", "tags", "users",
"profiles", "shell",
)
cmd_help.register(parser)
for submodule in submodules:
module = import_module("." + submodule, __name__)
module.register(parser)
# Register global options.
parser.add_argument(
'--debug', action='store_true', default=False,
help=argparse.SUPPRESS)
return parser | 0.001164 |
def from_bytearray(self, stream):
"""
Constructs this frame from input data stream, consuming as many bytes as necessary from
the beginning of the stream.
If stream does not contain enough data to construct a complete modbus frame, an EOFError
is raised and no data is consumed.
:param stream: bytearray to consume data from to construct this frame.
:except EOFError: Not enough data for complete frame; no data consumed.
"""
fmt = '>HHHBB'
size_header = struct.calcsize(fmt)
if len(stream) < size_header:
raise EOFError
(
self.transaction_id,
self.protocol_id,
self.length,
self.unit_id,
self.fcode
) = struct.unpack(fmt, bytes(stream[:size_header]))
size_total = size_header + self.length - 2
if len(stream) < size_total:
raise EOFError
self.data = stream[size_header:size_total]
del stream[:size_total] | 0.003883 |
def join_cwd(self, path=None):
"""
Join the path with the current working directory. If it is
specified for this instance of the object it will be used,
otherwise rely on the global value.
"""
if self.working_dir:
logger.debug(
"'%s' instance 'working_dir' set to '%s' for join_cwd",
type(self).__name__, self.working_dir,
)
cwd = self.working_dir
else:
cwd = getcwd()
logger.debug(
"'%s' instance 'working_dir' unset; "
"default to process '%s' for join_cwd",
type(self).__name__, cwd,
)
if path:
return join(cwd, path)
return cwd | 0.002597 |
def workflow(ctx, client):
"""List or manage workflows with subcommands."""
if ctx.invoked_subcommand is None:
from renku.models.refs import LinkReference
names = defaultdict(list)
for ref in LinkReference.iter_items(client, common_path='workflows'):
names[ref.reference.name].append(ref.name)
for path in client.workflow_path.glob('*.cwl'):
click.echo(
'{path}: {names}'.format(
path=path.name,
names=', '.join(
click.style(_deref(name), fg='green')
for name in names[path.name]
),
)
) | 0.00142 |
def is_repository_file(self, relativePath):
"""
Check whether a given relative path is a repository file path
:Parameters:
#. relativePath (string): File relative path
:Returns:
#. isRepoFile (boolean): Whether file is a repository file.
#. isFileOnDisk (boolean): Whether file is found on disk.
#. isFileInfoOnDisk (boolean): Whether file info is found on disk.
#. isFileClassOnDisk (boolean): Whether file class is found on disk.
"""
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
if relativePath == '':
return False, False, False, False
relaDir, name = os.path.split(relativePath)
fileOnDisk = os.path.isfile(os.path.join(self.__path, relativePath))
infoOnDisk = os.path.isfile(os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%name))
classOnDisk = os.path.isfile(os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%name))
cDir = self.__repo['walk_repo']
if len(relaDir):
for dirname in relaDir.split(os.sep):
dList = [d for d in cDir if isinstance(d, dict)]
if not len(dList):
cDir = None
break
cDict = [d for d in dList if dirname in d]
if not len(cDict):
cDir = None
break
cDir = cDict[0][dirname]
if cDir is None:
return False, fileOnDisk, infoOnDisk, classOnDisk
#if name not in cDir:
if str(name) not in [str(i) for i in cDir]:
return False, fileOnDisk, infoOnDisk, classOnDisk
# this is a repository registered file. check whether all is on disk
return True, fileOnDisk, infoOnDisk, classOnDisk | 0.009454 |
def typed_assign_stmt_handle(self, tokens):
"""Process Python 3.6 variable type annotations."""
if len(tokens) == 2:
if self.target_info >= (3, 6):
return tokens[0] + ": " + self.wrap_typedef(tokens[1])
else:
return tokens[0] + " = None" + self.wrap_comment(" type: " + tokens[1])
elif len(tokens) == 3:
if self.target_info >= (3, 6):
return tokens[0] + ": " + self.wrap_typedef(tokens[1]) + " = " + tokens[2]
else:
return tokens[0] + " = " + tokens[2] + self.wrap_comment(" type: " + tokens[1])
else:
raise CoconutInternalException("invalid variable type annotation tokens", tokens) | 0.008119 |
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, parameters, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
m = _rule_re.match(rule)
if m is None or m.end() < len(rule):
raise ValueError('Error while parsing rule {0}'.format(rule))
data = m.groupdict()
converter = data['converter'] or 'default'
return converter, data['args'] or None, data['variable'] | 0.001869 |
async def set_active_client(self, set_active_client_request):
"""Set the active client."""
response = hangouts_pb2.SetActiveClientResponse()
await self._pb_request('clients/setactiveclient',
set_active_client_request, response)
return response | 0.006536 |
def backward_inference(self, variables, evidence=None):
"""
Backward inference method using belief propagation.
Parameters:
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples:
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([ 0.66594382, 0.33405618])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
interface_nodes_dict = {}
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
end_bp = BeliefPropagation(self.start_junction_tree)
potential_dict = self.forward_inference(variables, evidence, 'potential')
update_factor = self._shift_factor(potential_dict[time_range], 1)
factor_values = {}
for time_slice in range(time_range, 0, -1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
evidence_prev_time = self._get_evidence(evidence, time_slice - 1, 0)
if evidence_prev_time:
interface_nodes_dict = {k: v for k, v in evidence_prev_time.items() if k in self.interface_nodes_0}
if evidence_time:
evidence_time.update(interface_nodes_dict)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, potential_dict[time_slice - 1])
forward_factor = self._shift_factor(potential_dict[time_slice], 1)
self._update_belief(mid_bp, self.out_clique, forward_factor, update_factor)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(variable_time, evidence=evidence_time, joint=False)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = DiscreteFactor([new_key], new_values[key].cardinality, new_values[key].values)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
in_clique_phi = self._marginalize_factor(self.interface_nodes_0, clique_phi)
update_factor = self._shift_factor(in_clique_phi, 1)
out_clique_phi = self._shift_factor(update_factor, 0)
self._update_belief(end_bp, self.start_interface_clique, potential_dict[0], out_clique_phi)
evidence_0 = self._get_evidence(evidence, 0, 0)
if variable_dict[0]:
factor_values.update(end_bp.query(variable_dict[0], evidence_0, joint=False))
return factor_values | 0.003209 |
def register_standard (id, source_types, target_types, requirements = []):
""" Creates new instance of the 'generator' class and registers it.
Returns the creates instance.
Rationale: the instance is returned so that it's possible to first register
a generator and then call 'run' method on that generator, bypassing all
generator selection.
"""
g = Generator (id, False, source_types, target_types, requirements)
register (g)
return g | 0.014374 |
def info_gen(self, code, message, compressed=False):
"""Dispatcher for the info generators.
Determines which __info_*_gen() should be used based on the supplied
parameters.
Args:
code: The status code for the command response.
message: The status message for the command reponse.
compressed: Force decompression. Useful for xz* commands.
Returns:
An info generator.
"""
if "COMPRESS=GZIP" in message:
return self.__info_gzip_gen()
if compressed:
return self.__info_yenczlib_gen()
return self.__info_plain_gen() | 0.003044 |
def resource(self, api_path=None, base_path='/api/now', chunk_size=None, **kwargs):
"""Creates a new :class:`Resource` object after validating paths
:param api_path: Path to the API to operate on
:param base_path: (optional) Base path override
:param chunk_size: Response stream parser chunk size (in bytes)
:param **kwargs: Pass request.request parameters to the Resource object
:return:
- :class:`Resource` object
:raises:
- InvalidUsage: If a path fails validation
"""
for path in [api_path, base_path]:
URLBuilder.validate_path(path)
return Resource(api_path=api_path,
base_path=base_path,
parameters=self.parameters,
chunk_size=chunk_size or 8192,
session=self.session,
base_url=self.base_url,
**kwargs) | 0.00309 |
def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
sep = os.path.sep
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = os.path.join(rootpath, exclude)
if not exclude.endswith(sep):
exclude += sep
f_excludes.append(exclude)
return f_excludes | 0.001799 |
def subclass_genesis(self, genesisclass):
"""Subclass the given genesis class and implement all abstract methods
:param genesisclass: the GenesisWin class to subclass
:type genesisclass: :class:`GenesisWin`
:returns: the subclass
:rtype: subclass of :class:`GenesisWin`
:raises: None
"""
class MayaGenesisWin(genesisclass):
"""Implementation of Genesis for maya
"""
def open_shot(self, taskfile):
"""Open the given taskfile
:param taskfile: the taskfile for the shot
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: none
"""
return self.open_file(taskfile)
def save_shot(self, jbfile, tf):
"""Save the shot to the location of jbfile
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None
"""
self.update_scene_node(tf)
self.save_file(jbfile)
def open_asset(self, taskfile):
"""Open the given taskfile
:param taskfile: the taskfile for the asset
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: None
"""
return self.open_file(taskfile)
def save_asset(self, jbfile, tf):
"""Save the asset to the location of jbfile
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: NotImplementedError
"""
self.update_scene_node(tf)
self.save_file(jbfile)
def save_file(self, jbfile):
"""Physically save current scene to jbfile\'s location
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:returns: None
:rtype: None
:raises: None
"""
p = jbfile.get_fullpath()
p = os.path.expanduser(p)
typ = 'mayaBinary'
if jbfile.get_ext() == 'ma':
typ = 'mayaAscii'
cmds.file(rename = p)
cmds.file(save=True, defaultExtensions=False, type=typ)
def open_file(self, taskfile):
"""Open the given jbfile in maya
:param taskfile: the taskfile for the asset
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: None
"""
r = self.check_modified()
if r is False:
return False
cmds.file(taskfile.path, open=True, force=True, ignoreVersion=True)
return True
def get_current_file(self, ):
"""Return the taskfile that is currently open or None if no taskfile is open
:returns: the open taskfile or None if no taskfile is open
:rtype: :class:`djadapter.models.TaskFile` | None
:raises: None
"""
node = jbscene.get_current_scene_node()
if not node:
return
tfid = cmds.getAttr('%s.taskfile_id' % node)
try:
return djadapter.taskfiles.get(id=tfid)
except djadapter.models.TaskFile.DoesNotExist:
log.error("No taskfile with id %s was found. Get current scene failed. Check your jb_sceneNode \'%s\'." % (tfid, node))
return
def get_scene_node(self, ):
"""Return the current scenen node or create one if it does not exist
:returns: Name of the scene node
:rtype: str
:raises: None
"""
scenenodes = cmds.ls(':jb_sceneNode*')
if len(scenenodes) > 1:
cmds.delete(scenenodes)
node = jbscene.get_current_scene_node()
if node is None:
cmds.namespace(set=':')
node = cmds.createNode('jb_sceneNode')
return node
def update_scene_node(self, tf):
"""Update the current scene node
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None
"""
node = self.get_scene_node()
cmds.setAttr('%s.taskfile_id' % node, lock=False)
cmds.setAttr('%s.taskfile_id' % node, tf.id)
cmds.setAttr('%s.taskfile_id' % node, lock=True)
def check_modified(self, ):
"""Check if the current scene was modified and ask the user to continue
This might save the scene if the user accepts to save before continuing.
:returns: True if the user accepted to continue.
:rtype: bool
:raises: None
"""
if not cmds.file(q=1, modified=1):
return True
curfile = cmds.file(q=1, sceneName=1)
r = cmds.confirmDialog( title='Save Changes', message='Save changes to %s?' % curfile,
button=['Save', 'Don\'t Save' ,'Cancel'],
defaultButton='Save', cancelButton='Cancel',
dismissString='Cancel')
if r == 'Cancel':
return False
if r == 'Save':
cmds.file(save=True, force=True)
return True
MayaGenesisWin.set_filetype(djadapter.FILETYPES['mayamainscene'],)
return MayaGenesisWin | 0.003257 |
def get_attribute_from_indices(self, indices: list, attribute_name: str):
"""Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices.
"""
return list(np.array(self.graph.vs[attribute_name])[indices]) | 0.006912 |
def invoke(tok: str, props: Inputs, opts: InvokeOptions = None) -> Awaitable[Any]:
"""
invoke dynamically invokes the function, tok, which is offered by a provider plugin. The inputs
can be a bag of computed values (Ts or Awaitable[T]s), and the result is a Awaitable[Any] that
resolves when the invoke finishes.
"""
log.debug(f"Invoking function: tok={tok}")
if opts is None:
opts = InvokeOptions()
async def do_invoke():
# If a parent was provided, but no provider was provided, use the parent's provider if one was specified.
if opts.parent is not None and opts.provider is None:
opts.provider = opts.parent.get_provider(tok)
# Construct a provider reference from the given provider, if one was provided to us.
provider_ref = None
if opts.provider is not None:
provider_urn = await opts.provider.urn.future()
provider_id = (await opts.provider.id.future()) or rpc.UNKNOWN
provider_ref = f"{provider_urn}::{provider_id}"
log.debug(f"Invoke using provider {provider_ref}")
monitor = get_monitor()
inputs = await rpc.serialize_properties(props, {})
version = opts.version or ""
log.debug(f"Invoking function prepared: tok={tok}")
req = provider_pb2.InvokeRequest(tok=tok, args=inputs, provider=provider_ref, version=version)
def do_invoke():
try:
return monitor.Invoke(req)
except grpc.RpcError as exn:
# gRPC-python gets creative with their exceptions. grpc.RpcError as a type is useless;
# the usefullness come from the fact that it is polymorphically also a grpc.Call and thus has
# the .code() member. Pylint doesn't know this because it's not known statically.
#
# Neither pylint nor I are the only ones who find this confusing:
# https://github.com/grpc/grpc/issues/10885#issuecomment-302581315
# pylint: disable=no-member
if exn.code() == grpc.StatusCode.UNAVAILABLE:
sys.exit(0)
details = exn.details()
raise Exception(details)
resp = await asyncio.get_event_loop().run_in_executor(None, do_invoke)
log.debug(f"Invoking function completed successfully: tok={tok}")
# If the invoke failed, raise an error.
if resp.failures:
raise Exception(f"invoke of {tok} failed: {resp.failures[0].reason} ({resp.failures[0].property})")
# Otherwise, return the output properties.
ret_obj = getattr(resp, 'return')
if ret_obj:
return rpc.deserialize_properties(ret_obj)
return {}
return asyncio.ensure_future(RPC_MANAGER.do_rpc("invoke", do_invoke)()) | 0.004212 |
def create_access_key(name, is_active=True, permitted=[], options={}):
""" Creates a new access key. A master key must be set first.
:param name: the name of the access key to create
:param is_active: Boolean value dictating whether this key is currently active (default True)
:param permitted: list of strings describing which operation types this key will permit
Legal values include "writes", "queries", "saved_queries", "cached_queries",
"datasets", and "schema".
:param options: dictionary containing more details about the key's permitted and restricted
functionality
"""
_initialize_client_from_environment()
return _client.create_access_key(name=name, is_active=is_active,
permitted=permitted, options=options) | 0.005869 |
def plot_fit(self, intervals=True, **kwargs):
""" Plots the fit of the model
Parameters
----------
intervals : Boolean
Whether to plot 95% confidence interval of states
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
series_type = kwargs.get('series_type','Smoothed')
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
date_index = copy.deepcopy(self.index)
date_index = date_index[self.integ:self.data_original.shape[0]+1]
if series_type == 'Smoothed':
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
elif series_type == 'Filtered':
mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values())
else:
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
mu = mu[0][:-1]
V = V.ravel()
plt.figure(figsize=figsize)
plt.subplot(3, 1, 1)
plt.title(self.data_name + " Raw and " + series_type)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.')
plt.plot(date_index,self.data,label='Data')
plt.plot(date_index,mu,label=series_type,c='black')
plt.legend(loc=2)
plt.subplot(3, 1, 2)
plt.title(self.data_name + " Local Level")
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.')
plt.plot(date_index,mu,label='Local Level')
plt.legend(loc=2)
plt.subplot(3, 1, 3)
plt.title("Measurement Noise")
plt.plot(date_index,self.data-mu)
plt.show() | 0.016803 |
def register_index(self, index):
"""Registers a given index:
* Creates and opens an index for it (if it doesn't exist yet)
* Sets some default values on it (unless they're already set)
Args:
index (PonyWhoosh.Index): An instance of PonyWhoosh.Index class
"""
self._indexes[index._name] = index
self.create_index(index)
return index | 0.002674 |
def _inject_selenium(self, test):
"""
Injects a selenium instance into the method.
"""
from django.conf import settings
test_case = get_test_case_class(test)
test_case.selenium_plugin_started = True
# Provide some reasonable default values
sel = selenium(
getattr(settings, "SELENIUM_HOST", "localhost"),
int(getattr(settings, "SELENIUM_PORT", 4444)),
getattr(settings, "SELENIUM_BROWSER_COMMAND", "*chrome"),
getattr(settings, "SELENIUM_URL_ROOT", "http://127.0.0.1:8000/"))
try:
sel.start()
except socket.error:
if getattr(settings, "FORCE_SELENIUM_TESTS", False):
raise
else:
raise SkipTest("Selenium server not available.")
else:
test_case.selenium_started = True
# Only works on method test cases, because we obviously need
# self.
if isinstance(test.test, nose.case.MethodTestCase):
test.test.test.im_self.selenium = sel
elif isinstance(test.test, TestCase):
test.test.run.im_self.selenium = sel
else:
raise SkipTest("Test skipped because it's not a method.") | 0.001548 |
def create(self, equipments):
"""
Method to create equipments
:param equipments: List containing equipments desired to be created on database
:return: None
"""
data = {'equipments': equipments}
return super(ApiEquipment, self).post('api/v3/equipment/', data) | 0.009494 |
def load_config(self):
"""
Load configuration for the service
Args:
config_file: Configuration file path
"""
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
) | 0.003155 |
def delete_file(self, path, prefixed_path, source_storage):
"""
Override delete_file to skip modified time and exists lookups.
"""
if not self.collectfast_enabled:
return super(Command, self).delete_file(
path, prefixed_path, source_storage)
if not self.dry_run:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
else:
self.log("Pretending to delete '%s'" % path)
return True | 0.003899 |
def generate_module(spec, out):
"""
Given an AMQP spec parsed into an xml.etree.ElemenTree,
and a file-like 'out' object to write to, generate
the skeleton of a Python module.
"""
#
# HACK THE SPEC so that 'access' is handled by 'channel' instead of 'connection'
#
for amqp_class in spec.findall('class'):
if amqp_class.attrib['name'] == 'access':
amqp_class.attrib['handler'] = 'channel'
#
# Build up some helper dictionaries
#
for domain in spec.findall('domain'):
domains[domain.attrib['name']] = domain.attrib['type']
for amqp_class in spec.findall('class'):
for amqp_method in amqp_class.findall('method'):
method_name_map[(amqp_class.attrib['name'], amqp_method.attrib['name'])] = \
(
amqp_class.attrib['index'],
amqp_method.attrib['index'],
amqp_class.attrib['handler'].capitalize() + '.' +
_fixup_method_name(amqp_class, amqp_method),
)
#### Actually generate output
for amqp_class in spec.findall('class'):
if amqp_class.attrib['handler'] == amqp_class.attrib['name']:
generate_class(spec, amqp_class, out)
out.write('_METHOD_MAP = {\n')
for amqp_class in spec.findall('class'):
print amqp_class.attrib
# for chassis in amqp_class.findall('chassis'):
# print ' ', chassis.attrib
for amqp_method in amqp_class.findall('method'):
# print ' ', amqp_method.attrib
# for chassis in amqp_method.findall('chassis'):
# print ' ', chassis.attrib
chassis = [x.attrib['name'] for x in amqp_method.findall('chassis')]
if 'client' in chassis:
out.write(" (%s, %s): (%s, %s._%s),\n" % (
amqp_class.attrib['index'],
amqp_method.attrib['index'],
amqp_class.attrib['handler'].capitalize(),
amqp_class.attrib['handler'].capitalize(),
_fixup_method_name(amqp_class, amqp_method)))
out.write('}\n\n')
out.write('_METHOD_NAME_MAP = {\n')
for amqp_class in spec.findall('class'):
for amqp_method in amqp_class.findall('method'):
out.write(" (%s, %s): '%s.%s',\n" % (
amqp_class.attrib['index'],
amqp_method.attrib['index'],
amqp_class.attrib['handler'].capitalize(),
_fixup_method_name(amqp_class, amqp_method)))
out.write('}\n') | 0.003458 |
def update_not_existing_kwargs(to_update, update_from):
"""
This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts.
"""
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update | 0.009804 |
def _multi_permission_mask(mode):
"""
Support multiple, comma-separated Unix chmod symbolic modes.
>>> _multi_permission_mask('a=r,u+w')(0) == 0o644
True
"""
def compose(f, g):
return lambda *args, **kwargs: g(f(*args, **kwargs))
return functools.reduce(compose, map(_permission_mask, mode.split(','))) | 0.00295 |
def data_storage_dir(self):
"""
Temporary folder used to store intermediate calculation data in case the memory is saturated
"""
if self._data_storage_dir is None:
self._data_storage_dir = tempfile.mkdtemp(prefix = "openfisca_")
log.warn((
"Intermediate results will be stored on disk in {} in case of memory overflow. "
"You should remove this directory once you're done with your simulation."
).format(self._data_storage_dir))
return self._data_storage_dir | 0.012281 |
def update_count(self):
""" updates likes and dislikes count """
node_rating_count = self.node.rating_count
node_rating_count.likes = self.node.vote_set.filter(vote=1).count()
node_rating_count.dislikes = self.node.vote_set.filter(vote=-1).count()
node_rating_count.save() | 0.00641 |
def modify_calendar_resource(self, calres, attrs):
"""
:param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyCalendarResource', {
'id': self._get_or_fetch_id(
calres, self.get_calendar_resource),
'a': attrs
}) | 0.004474 |
def ext_language(ext, exts=None):
"""Language of the extension in those extensions
If exts is supplied, then restrict recognition to those exts only
If exts is not supplied, then use all known extensions
>>> ext_language('.py') == 'python'
True
"""
languages = {
'.py': 'python',
'.py2': 'python2',
'.py3': 'python3',
'.sh': 'bash',
'.bash': 'bash',
'.pl': 'perl',
'.js': 'javascript',
'.txt': 'english',
}
ext_languages = {_: languages[_] for _ in exts} if exts else languages
return ext_languages.get(ext) | 0.001631 |
def folderitems(self):
"""TODO: Refactor to non-classic mode
"""
items = super(ReferenceResultsView, self).folderitems()
self.categories.sort()
return items | 0.010204 |
def check_on_curve(self):
"""raise :class:`NoSuchPointError` if the point is not actually on the curve."""
if not self._curve.contains_point(*self):
raise NoSuchPointError('({},{}) is not on the curve {}'.format(self[0], self[1], self._curve)) | 0.01476 |
def minimum_image_dr( self, r1, r2, cutoff=None ):
"""
Calculate the shortest distance between two points in the cell,
accounting for periodic boundary conditions.
Args:
r1 (np.array): fractional coordinates of point r1.
r2 (np.array): fractional coordinates of point r2.
cutoff (:obj: `float`, optional): if set, return zero if the minimum distance is greater than `cutoff`. Defaults to None.
Returns:
(float): The distance between r1 and r2.
"""
delta_r_vector = self.minimum_image( r1, r2 )
return( self.dr( np.zeros( 3 ), delta_r_vector, cutoff ) ) | 0.02099 |
def create_run(cmd, project, exp, grp):
"""
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
"""
from benchbuild.utils import schema as s
session = s.Session()
run = s.Run(
command=str(cmd),
project_name=project.name,
project_group=project.group,
experiment_name=exp,
run_group=str(grp),
experiment_group=project.experiment.id)
session.add(run)
session.commit()
return (run, session) | 0.000953 |
def _filter_validate(filepath, location, values, validate):
"""Generator for validate() results called against all given values. On
errors, fields are warned about and ignored, unless strict mode is set in
which case a compiler error is raised.
"""
for value in values:
if not isinstance(value, dict):
warn_invalid(filepath, location, value, '(expected a dict)')
continue
try:
yield validate(**value)
except dbt.exceptions.JSONValidationException as exc:
# we don't want to fail the full run, but we do want to fail
# parsing this file
warn_invalid(filepath, location, value, '- ' + exc.msg)
continue | 0.001374 |
def get_extreme(self, target_prop, maximize=True, min_temp=None,
max_temp=None, min_doping=None, max_doping=None,
isotropy_tolerance=0.05, use_average=True):
"""
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
"""
def is_isotropic(x, isotropy_tolerance):
"""
Internal method to tell you if 3-vector "x" is isotropic
Args:
x: the vector to determine isotropy for
isotropy_tolerance: tolerance, e.g. 0.05 is 5%
"""
if len(x) != 3:
raise ValueError("Invalid input to is_isotropic!")
st = sorted(x)
return bool(all([st[0], st[1], st[2]]) and \
(abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and \
(abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and \
(abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance))
if target_prop.lower() == "seebeck":
d = self.get_seebeck(output="eigs", doping_levels=True)
elif target_prop.lower() == "power factor":
d = self.get_power_factor(output="eigs", doping_levels=True)
elif target_prop.lower() == "conductivity":
d = self.get_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "kappa":
d = self.get_thermal_conductivity(output="eigs",
doping_levels=True)
elif target_prop.lower() == "zt":
d = self.get_zt(output="eigs", doping_levels=True)
else:
raise ValueError("Target property: {} not recognized!".
format(target_prop))
absval = True # take the absolute value of properties
x_val = None
x_temp = None
x_doping = None
x_isotropic = None
output = {}
min_temp = min_temp or 0
max_temp = max_temp or float('inf')
min_doping = min_doping or 0
max_doping = max_doping or float('inf')
for pn in ('p', 'n'):
for t in d[pn]: # temperatures
if min_temp <= float(t) <= max_temp:
for didx, evs in enumerate(d[pn][t]):
doping_lvl = self.doping[pn][didx]
if min_doping <= doping_lvl <= max_doping:
isotropic = is_isotropic(evs, isotropy_tolerance)
if absval:
evs = [abs(x) for x in evs]
if use_average:
val = float(sum(evs)) / len(evs)
else:
val = max(evs)
if x_val is None or (val > x_val and maximize) \
or (val < x_val and not maximize):
x_val = val
x_temp = t
x_doping = doping_lvl
x_isotropic = isotropic
output[pn] = {'value': x_val, 'temperature': x_temp,
'doping': x_doping, 'isotropic': x_isotropic}
x_val = None
if maximize:
max_type = 'p' if output['p']['value'] >= \
output['n']['value'] else 'n'
else:
max_type = 'p' if output['p']['value'] <= \
output['n']['value'] else 'n'
output['best'] = output[max_type]
output['best']['carrier_type'] = max_type
return output | 0.001938 |
def member(self, phlo_id, node_id,
member_id, action,
node_type='conference_bridge'):
"""
:param phlo_id:
:param node_id:
:param member_id:
:param action:
:param node_type: default value `conference_bridge`
:return:
"""
data = {
'member_id': member_id,
'phlo_id': phlo_id,
'node_id': node_id,
'node_type': node_type
}
member = Member(self.client, data)
return getattr(member, action)() | 0.007156 |
def _handle_response(self, response):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise BinanceAPIException(response)
try:
return response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % response.text) | 0.008163 |
def emit(self, signal, value=None, gather=False):
"""Emits a signal, causing all slot methods connected with the signal to be called (optionally w/ related value)
signal: the name of the signal to emit, must be defined in the classes 'signals' list.
value: the value to pass to all connected slot methods.
gather: if set, causes emit to return a list of all slot results
"""
results = [] if gather else True
if hasattr(self, 'connections') and signal in self.connections:
for condition, values in self.connections[signal].items():
if condition is None or condition == value or (callable(condition) and condition(value)):
for slot, transform in values.items():
if transform is not None:
if callable(transform):
used_value = transform(value)
elif isinstance(transform, str):
used_value = transform.format(value=value)
else:
used_value = transform
else:
used_value = value
if used_value is not None:
if(accept_arguments(slot, 1)):
result = slot(used_value)
elif(accept_arguments(slot, 0)):
result = slot()
else:
result = ''
else:
result = slot()
if gather:
results.append(result)
return results | 0.002809 |
async def async_send_to_pipe_channel(channel_name,
label,
value):
'Send message asynchronously through pipe to client component'
pcn = _form_pipe_channel_name(channel_name)
channel_layer = get_channel_layer()
await channel_layer.group_send(pcn,
{"type":"pipe.value",
"label":label,
"value":value}) | 0.008081 |
def run_profiler(self):
"""Run profiler"""
if self.main.editor.save():
self.switch_to_plugin()
self.analyze(self.main.editor.get_current_filename()) | 0.010638 |
def write_antenna(page, args, seg_plot=None, grid=False, ipn=False):
"""
Write antenna factors to merkup.page object page and generate John's
detector response plot.
"""
from pylal import antenna
page.h3()
page.add('Antenna factors and sky locations')
page.h3.close()
th = []
td = []
th2 = []
td2 = []
ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]
if ipn:
antenna_ifo = {}
ra = []
dec = []
# FIXME: Remove hardcoding here and show this in all cases
search_file = open('../../../S5IPN_GRB%s_search_180deg.txt'
% args.grb_name)
for line in search_file:
ra.append(line.split()[0])
dec.append(line.split()[1])
for ifo in ifos:
antenna_ifo[ifo] = []
for k, l in zip(ra, dec):
_, _, _, f_q = antenna.response(args.start_time, float(k),
float(l), 0.0, 0.0, 'degree',
ifo)
antenna_ifo[ifo].append(round(f_q,3))
dectKeys = antenna_ifo.keys()
for elements in range(len(antenna_ifo.values()[0])):
newDict={}
for detectors in range(len(antenna_ifo.keys())):
newDict[dectKeys[detectors]] = antenna_ifo[\
dectKeys[detectors]][elements]
for key in newDict.keys():
th.append(key)
td.append(newDict.values())
page = write_table(page, list(set(th)), td)
for ifo in ifos:
_, _, _, f_q = antenna.response(args.start_time, args.ra, args.dec,
0.0, 0.0, 'degree',ifo)
th.append(ifo)
td.append(round(f_q, 3))
#FIXME: Work out a way to make these external calls safely
#cmmnd = 'projectedDetectorTensor --gps-sec %d --ra-deg %f --dec-deg %f' \
# % (args.start_time,args.ra, args.dec)
#for ifo in ifos:
# if ifo == 'H1':
# cmmnd += ' --display-lho'
# elif ifo == 'L1':
# cmmnd += ' --display-llo'
# elif ifo == 'V1':
# cmmnd += ' --display-virgo'
#status = make_external_call(cmmnd)
page = write_table(page, th, td)
# plot = markup.page()
# p = "projtens.png"
# plot.a(href=p, title="Detector response and polarization")
# plot.img(src=p)
# plot.a.close()
# th2 = ['Response Diagram']
# td2 = [plot() ]
# FIXME: Add these in!!
# plot = markup.page()
# p = "ALL_TIMES/plots_clustered/GRB%s_search.png"\
# % args.grb_name
# plot.a(href=p, title="Error Box Search")
# plot.img(src=p)
# plot.a.close()
# th2.append('Error Box Search')
# td2.append(plot())
# plot = markup.page()
# p = "ALL_TIMES/plots_clustered/GRB%s_simulations.png"\
# % args.grb_name
# plot.a(href=p, title="Error Box Simulations")
# plot.img(src=p)
# plot.a.close()
# th2.append('Error Box Simulations')
# td2.append(plot())
if seg_plot is not None:
plot = markup.page()
p = os.path.basename(seg_plot)
plot.a(href=p, title="Science Segments")
plot.img(src=p)
plot.a.close()
th2.append('Science Segments')
td2.append(plot())
plot = markup.page()
p = "ALL_TIMES/plots_clustered/GRB%s_sky_grid.png"\
% args.grb_name
plot.a(href=p, title="Sky Grid")
plot.img(src=p)
plot.a.close()
th2.append('Sky Grid')
td2.append(plot())
# plot = markup.page()
# p = "GRB%s_inspiral_horizon_distance.png"\
# % args.grb_name
# plot.a(href=p, title="Inspiral Horizon Distance")
# plot.img(src=p)
# plot.a.close()
# th2.append('Inspiral Horizon Distance')
# td2.append(plot())
page = write_table(page, th2, td2)
return page | 0.002552 |
def _generate_contents(self, tar):
"""
Adds configuration files to tarfile instance.
:param tar: tarfile instance
:returns: None
"""
text = self.render(files=False)
# create a list with all the packages (and remove empty entries)
vpn_instances = vpn_pattern.split(text)
if '' in vpn_instances:
vpn_instances.remove('')
# create a file for each VPN
for vpn in vpn_instances:
lines = vpn.split('\n')
vpn_name = lines[0]
text_contents = '\n'.join(lines[2:])
# do not end with double new line
if text_contents.endswith('\n\n'):
text_contents = text_contents[0:-1]
self._add_file(tar=tar,
name='{0}{1}'.format(vpn_name, config_suffix),
contents=text_contents) | 0.00223 |
def mongorestore(mongo_user, mongo_password, backup_directory_path, drop_database=False, silent=False):
""" Warning: Setting drop_database to True will drop the ENTIRE
CURRENTLY RUNNING DATABASE before restoring.
Mongorestore requires a running mongod process, in addition the provided
user must have restore permissions for the database. A mongolia superuser
will have more than adequate permissions, but a regular user may not.
"""
if not path.exists(backup_directory_path):
raise Exception("the provided tar directory %s does not exist."
% (backup_directory_path))
if silent:
mongorestore_command = ("mongorestore --quiet -u %s -p %s %s"
% (mongo_user, mongo_password, backup_directory_path))
else:
mongorestore_command = ("mongorestore -v -u %s -p %s %s"
% (mongo_user, mongo_password, backup_directory_path))
if drop_database:
mongorestore_command = mongorestore_command + " --drop"
call(mongorestore_command, silent=silent) | 0.007826 |
def profile_solver(ml, accel=None, **kwargs):
"""Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
ml.solve(b, residuals=residuals, **kwargs)
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals) | 0.000734 |
def is_parent_of_log(self, id_, log_id):
"""Tests if an ``Id`` is a direct parent of a log.
arg: id (osid.id.Id): an ``Id``
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if this ``id`` is a parent of
``log_id,`` ``false`` otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``id`` or ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_parent_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=log_id)
return self._hierarchy_session.is_parent(id_=log_id, parent_id=id_) | 0.002962 |