code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except Error:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except Error:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token | Processes a request_token request and returns the
request token on success. |
async def send_chat_action(self, chat_id: typing.Union[base.Integer, base.String],
action: base.String) -> base.Boolean:
"""
Use this method when you need to tell the user that something is happening on the bot's side.
The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its typing status).
We only recommend using this method when a response from the bot will take
a noticeable amount of time to arrive.
Source: https://core.telegram.org/bots/api#sendchataction
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param action: Type of action to broadcast
:type action: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_CHAT_ACTION, payload)
return result | Use this method when you need to tell the user that something is happening on the bot's side.
The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its typing status).
We only recommend using this method when a response from the bot will take
a noticeable amount of time to arrive.
Source: https://core.telegram.org/bots/api#sendchataction
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param action: Type of action to broadcast
:type action: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean` |
def proximal_convex_conj_l2(space, lam=1, g=None):
r"""Proximal operator factory of the convex conj of the l2-norm/distance.
Function for the proximal operator of the convex conjugate of the
functional F where F is the l2-norm (or distance to g, if given)::
F(x) = lam ||x - g||_2
with x and g elements in ``space``, scaling factor lam, and given data g.
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use the `proximal_convex_conj_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by is given by
.. math::
F(x) = \lambda \|x - g\|_2
The convex conjugate :math:`F^*` of :math:`F` is given by
.. math::
F^*(y) = \begin{cases}
0 & \text{if } \|y-g\|_2 \leq \lambda, \\
\infty & \text{else.}
\end{cases}
For a step size :math:`\sigma`, the proximal operator of
:math:`\sigma F^*` is given by the projection onto the set of :math:`y`
satisfying :math:`\|y-g\|_2 \leq \lambda`, i.e., by
.. math::
\mathrm{prox}_{\sigma F^*}(y) = \begin{cases}
\lambda \frac{y - g}{\|y - g\|}
& \text{if } \|y-g\|_2 > \lambda, \\
y & \text{if } \|y-g\|_2 \leq \lambda
\end{cases}
Note that the expression is independent of :math:`\sigma`.
See Also
--------
proximal_l2 : proximal without convex conjugate
proximal_convex_conj_l2_squared : proximal for squared norm/distance
"""
prox_l2 = proximal_l2(space, lam=lam, g=g)
return proximal_convex_conj(prox_l2) | r"""Proximal operator factory of the convex conj of the l2-norm/distance.
Function for the proximal operator of the convex conjugate of the
functional F where F is the l2-norm (or distance to g, if given)::
F(x) = lam ||x - g||_2
with x and g elements in ``space``, scaling factor lam, and given data g.
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use the `proximal_convex_conj_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by is given by
.. math::
F(x) = \lambda \|x - g\|_2
The convex conjugate :math:`F^*` of :math:`F` is given by
.. math::
F^*(y) = \begin{cases}
0 & \text{if } \|y-g\|_2 \leq \lambda, \\
\infty & \text{else.}
\end{cases}
For a step size :math:`\sigma`, the proximal operator of
:math:`\sigma F^*` is given by the projection onto the set of :math:`y`
satisfying :math:`\|y-g\|_2 \leq \lambda`, i.e., by
.. math::
\mathrm{prox}_{\sigma F^*}(y) = \begin{cases}
\lambda \frac{y - g}{\|y - g\|}
& \text{if } \|y-g\|_2 > \lambda, \\
y & \text{if } \|y-g\|_2 \leq \lambda
\end{cases}
Note that the expression is independent of :math:`\sigma`.
See Also
--------
proximal_l2 : proximal without convex conjugate
proximal_convex_conj_l2_squared : proximal for squared norm/distance |
def resolve_profile(name, include_expired=False, include_name_record=False, hostport=None, proxy=None):
"""
Resolve a name to its profile.
This is a multi-step process:
1. get the name record
2. get the zone file
3. parse the zone file to get its URLs (if it's not well-formed, then abort)
4. fetch and authenticate the JWT at each URL (abort if there are none)
5. extract the profile JSON and return that, along with the zone file and public key
Return {'profile': ..., 'zonefile': ..., 'public_key': ...['name_rec': ...]} on success
Return {'error': ...} on error
"""
assert hostport or proxy, 'Need hostport or proxy'
name_rec = get_name_record(name, include_history=False, include_expired=include_expired, include_grace=False, proxy=proxy, hostport=hostport)
if 'error' in name_rec:
log.error("Failed to get name record for {}: {}".format(name, name_rec['error']))
return {'error': 'Failed to get name record: {}'.format(name_rec['error']), 'http_status': name_rec.get('http_status', 500)}
if 'grace_period' in name_rec and name_rec['grace_period']:
log.error("Name {} is in the grace period".format(name))
return {'error': 'Name {} is not yet expired, but is in the renewal grace period.'.format(name), 'http_status': name_rec.get('http_status', 404)}
if 'value_hash' not in name_rec:
log.error("Name record for {} has no zone file hash".format(name))
return {'error': 'No zone file hash in name record for {}'.format(name), 'http_status': 404}
zonefile_hash = name_rec['value_hash']
zonefile_res = get_zonefiles(hostport, [zonefile_hash], proxy=proxy)
if 'error' in zonefile_res:
log.error("Failed to get zone file for {} for name {}: {}".format(zonefile_hash, name, zonefile_res['error']))
return {'error': 'Failed to get zone file for {}'.format(name), 'http_status': 404}
zonefile_txt = zonefile_res['zonefiles'][zonefile_hash]
log.debug("Got {}-byte zone file {}".format(len(zonefile_txt), zonefile_hash))
try:
zonefile_data = blockstack_zones.parse_zone_file(zonefile_txt)
zonefile_data = dict(zonefile_data)
assert 'uri' in zonefile_data
if len(zonefile_data['uri']) == 0:
return {'error': 'No URI records in zone file {} for {}'.format(zonefile_hash, name), 'http_status': 404}
except Exception as e:
if BLOCKSTACK_TEST:
log.exception(e)
return {'error': 'Failed to parse zone file {} for {}'.format(zonefile_hash, name), 'http_status': 404}
urls = [uri['target'] for uri in zonefile_data['uri']]
for url in urls:
jwt = get_JWT(url, address=str(name_rec['address']))
if not jwt:
continue
if 'claim' not in jwt['payload']:
# not something we produced
log.warning("No 'claim' field in payload for {}".format(url))
continue
# success!
profile_data = jwt['payload']['claim']
public_key = str(jwt['payload']['issuer']['publicKey'])
# return public key that matches address
pubkeys = [virtualchain.ecdsalib.ecdsa_public_key(keylib.key_formatting.decompress(public_key)),
virtualchain.ecdsalib.ecdsa_public_key(keylib.key_formatting.compress(public_key))]
if name_rec['address'] == pubkeys[0].address():
public_key = pubkeys[0].to_hex()
else:
public_key = pubkeys[1].to_hex()
ret = {
'profile': profile_data,
'zonefile': zonefile_txt,
'public_key': public_key,
}
if include_name_record:
ret['name_record'] = name_rec
return ret
log.error("No zone file URLs resolved to a JWT with the public key whose address is {}".format(name_rec['address']))
return {'error': 'No profile found for this name', 'http_status': 404} | Resolve a name to its profile.
This is a multi-step process:
1. get the name record
2. get the zone file
3. parse the zone file to get its URLs (if it's not well-formed, then abort)
4. fetch and authenticate the JWT at each URL (abort if there are none)
5. extract the profile JSON and return that, along with the zone file and public key
Return {'profile': ..., 'zonefile': ..., 'public_key': ...['name_rec': ...]} on success
Return {'error': ...} on error |
def nolist(self, account): # pragma: no cover
""" Remove an other account from any list of this account
"""
assert callable(self.blockchain.account_whitelist)
return self.blockchain.account_whitelist(account, lists=[], account=self) | Remove an other account from any list of this account |
def is_file(cls, file):
'''Return whether the file is likely CSS.'''
peeked_data = wpull.string.printable_bytes(
wpull.util.peek_file(file)).lower()
if b'<html' in peeked_data:
return VeryFalse
if re.search(br'@import |color:|background[a-z-]*:|font[a-z-]*:',
peeked_data):
return True | Return whether the file is likely CSS. |
def _derive_stereographic():
"""Compute the formulae to cut-and-paste into the routine below."""
from sympy import symbols, atan2, acos, rot_axis1, rot_axis3, Matrix
x_c, y_c, z_c, x, y, z = symbols('x_c y_c z_c x y z')
# The angles we'll need to rotate through.
around_z = atan2(x_c, y_c)
around_x = acos(-z_c)
# Apply rotations to produce an "o" = output vector.
v = Matrix([x, y, z])
xo, yo, zo = rot_axis1(around_x) * rot_axis3(-around_z) * v
# Which we then use the stereographic projection to produce the
# final "p" = plotting coordinates.
xp = xo / (1 - zo)
yp = yo / (1 - zo)
return xp, yp | Compute the formulae to cut-and-paste into the routine below. |
def _read_regex(ctx: ReaderContext) -> Pattern:
"""Read a regex reader macro from the input stream."""
s = _read_str(ctx, allow_arbitrary_escapes=True)
try:
return langutil.regex_from_str(s)
except re.error:
raise SyntaxError(f"Unrecognized regex pattern syntax: {s}") | Read a regex reader macro from the input stream. |
def process_request(self, num_pending, ports, request_blocking=False):
'''port is the open port at the worker, num_pending is the num_pending of stack.
A non-zero num_pending means that the worker is pending on something while
looking for new job, so the worker should not be killed.
'''
if any(port in self._step_requests for port in ports):
# if the port is available
port = [x for x in ports if x in self._step_requests][0]
self._worker_backend_socket.send_pyobj(
self._step_requests.pop(port))
self._n_processed += 1
self.report(f'Step {port} processed')
# port should be in claimed ports
self._claimed_ports.remove(port)
if ports[0] in self._last_pending_time:
self._last_pending_time.pop(ports[0])
elif any(port in self._claimed_ports for port in ports):
# the port is claimed, but the real message is not yet available
self._worker_backend_socket.send_pyobj({})
self.report(f'pending with claimed {ports}')
elif any(port in self._blocking_ports for port in ports):
# in block list but appear to be idle, kill it
self._max_workers -= 1
env.logger.debug(
f'Reduce maximum number of workers to {self._max_workers} after completion of a blocking subworkflow.'
)
for port in ports:
if port in self._blocking_ports:
self._blocking_ports.remove(port)
if port in self._available_ports:
self._available_ports.remove(port)
self._worker_backend_socket.send_pyobj(None)
self._num_workers -= 1
self.report(f'Blocking worker {ports} killed')
elif self._substep_requests:
# port is not claimed, free to use for substep worker
msg = self._substep_requests.pop()
self._worker_backend_socket.send_pyobj(msg)
self._n_processed += 1
self.report(f'Substep processed with {ports[0]}')
# port can however be in available ports
for port in ports:
if port in self._available_ports:
self._available_ports.remove(port)
if port in self._last_pending_time:
self._last_pending_time.pop(port)
elif request_blocking:
self._worker_backend_socket.send_pyobj({})
return ports[0]
elif num_pending == 0 and ports[
0] in self._last_pending_time and time.time(
) - self._last_pending_time[ports[0]] > 5:
# kill the worker
for port in ports:
if port in self._available_ports:
self._available_ports.remove(port)
self._worker_backend_socket.send_pyobj(None)
self._num_workers -= 1
self.report(f'Kill standing {ports}')
self._last_pending_time.pop(ports[0])
else:
if num_pending == 0 and ports[0] not in self._last_pending_time:
self._last_pending_time[ports[0]] = time.time()
self._available_ports.add(ports[0])
self._worker_backend_socket.send_pyobj({})
ports = tuple(ports)
if (ports, num_pending) not in self._last_pending_msg or time.time(
) - self._last_pending_msg[(ports, num_pending)] > 1.0:
self.report(
f'pending with port {ports} at num_pending {num_pending}')
self._last_pending_msg[(ports, num_pending)] = time.time() | port is the open port at the worker, num_pending is the num_pending of stack.
A non-zero num_pending means that the worker is pending on something while
looking for new job, so the worker should not be killed. |
def invokeRunnable(self):
"""
Run my runnable, and reschedule or delete myself based on its result.
Must be run in a transaction.
"""
runnable = self.runnable
if runnable is None:
self.deleteFromStore()
else:
try:
self.running = True
newTime = runnable.run()
finally:
self.running = False
self._rescheduleFromRun(newTime) | Run my runnable, and reschedule or delete myself based on its result.
Must be run in a transaction. |
def gene_counts(self):
"""
Returns number of elements overlapping each gene name. Expects the
derived class (VariantCollection or EffectCollection) to have
an implementation of groupby_gene_name.
"""
return {
gene_name: len(group)
for (gene_name, group)
in self.groupby_gene_name().items()
} | Returns number of elements overlapping each gene name. Expects the
derived class (VariantCollection or EffectCollection) to have
an implementation of groupby_gene_name. |
def on_start(self):
"""Runs when the actor is started and schedules a status update
"""
logger.info('StatusReporter started.')
# if configured not to report status then return immediately
if self.config['status_update_interval'] == 0:
logger.info('StatusReporter disabled by configuration.')
return
self.in_future.report_status() | Runs when the actor is started and schedules a status update |
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
"""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]:
b_selector = bd_idx[i]
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector])
sigmasq[i] = - x[:, 0].dot(b[:, 0])
return kvalues, sigmasq | Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python. |
def stop_archive(self, archive_id):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
@param [String] archive_id The archive ID of the archive you want to stop recording.
:rtype: The Archive object corresponding to the archive being stopped.
"""
response = requests.post(self.endpoints.archive_url(archive_id) + '/stop', headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
elif response.status_code == 409:
raise ArchiveError("Archive is not in started state")
else:
raise RequestError("An unexpected error occurred", response.status_code) | Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
@param [String] archive_id The archive ID of the archive you want to stop recording.
:rtype: The Archive object corresponding to the archive being stopped. |
def get(self, sid):
"""
Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
) | Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext |
def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller | Return the server controller. |
def to_json(self):
"""Convert the Design Day to a dictionary."""
return {
'location': self.location.to_json(),
'design_days': [des_d.to_json() for des_d in self.design_days]
} | Convert the Design Day to a dictionary. |
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset) | Parse one or more log files |
def _load_cpp4(self, filename):
"""Initializes Grid from a CCP4 file."""
ccp4 = CCP4.CCP4()
ccp4.read(filename)
grid, edges = ccp4.histogramdd()
self.__init__(grid=grid, edges=edges, metadata=self.metadata) | Initializes Grid from a CCP4 file. |
def compute_region_border(start, end):
"""
given the buffer start and end indices of a range, compute the border edges
that should be drawn to enclose the range.
this function currently assumes 0x10 length rows.
the result is a dictionary from buffer index to Cell instance.
the Cell instance has boolean properties "top", "bottom", "left", and "right"
that describe if a border should be drawn on that side of the cell view.
:rtype: Mapping[int, CellT]
"""
cells = defaultdict(Cell)
start_row = row_number(start)
end_row = row_number(end)
if end % 0x10 == 0:
end_row -= 1
## topmost cells
if start_row == end_row:
for i in range(start, end):
cells[i].top = True
else:
for i in range(start, row_end_index(start) + 1):
cells[i].top = True
# cells on second row, top left
if start_row != end_row:
next_row_start = row_start_index(start) + 0x10
for i in range(next_row_start, next_row_start + column_number(start)):
cells[i].top = True
## bottommost cells
if start_row == end_row:
for i in range(start, end):
cells[i].bottom = True
else:
for i in range(row_start_index(end), end):
cells[i].bottom = True
# cells on second-to-last row, bottom right
if start_row != end_row:
prev_row_end = row_end_index(end) - 0x10
for i in range(prev_row_end - (0x10 - column_number(end) - 1), prev_row_end + 1):
cells[i].bottom = True
## leftmost cells
if start_row == end_row:
cells[start].left = True
else:
second_row_start = row_start_index(start) + 0x10
for i in range(second_row_start, row_start_index(end) + 0x10, 0x10):
cells[i].left = True
# cells in first row, top left
if start_row != end_row:
cells[start].left = True
## rightmost cells
if start_row == end_row:
cells[end - 1].right = True
else:
penultimate_row_end = row_end_index(end) - 0x10
for i in range(row_end_index(start), penultimate_row_end + 0x10, 0x10):
cells[i].right = True
# cells in last row, bottom right
if start_row != end_row:
cells[end - 1].right = True
# convert back to standard dict
# trick from: http://stackoverflow.com/a/20428703/87207
cells.default_factory = None
return cells | given the buffer start and end indices of a range, compute the border edges
that should be drawn to enclose the range.
this function currently assumes 0x10 length rows.
the result is a dictionary from buffer index to Cell instance.
the Cell instance has boolean properties "top", "bottom", "left", and "right"
that describe if a border should be drawn on that side of the cell view.
:rtype: Mapping[int, CellT] |
def gene_names(self):
"""
Return names of all genes which overlap this variant. Calling
this method is significantly cheaper than calling `Variant.genes()`,
which has to issue many more queries to construct each Gene object.
"""
return self.ensembl.gene_names_at_locus(
self.contig, self.start, self.end) | Return names of all genes which overlap this variant. Calling
this method is significantly cheaper than calling `Variant.genes()`,
which has to issue many more queries to construct each Gene object. |
def ldr(scatterer, h_pol=True):
"""
Linear depolarizarion ratio (LDR) for the current setup.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), return LDR_h.
If False, return LDR_v.
Returns:
The LDR.
"""
Z = scatterer.get_Z()
if h_pol:
return (Z[0,0] - Z[0,1] + Z[1,0] - Z[1,1]) / \
(Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])
else:
return (Z[0,0] + Z[0,1] - Z[1,0] - Z[1,1]) / \
(Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1]) | Linear depolarizarion ratio (LDR) for the current setup.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), return LDR_h.
If False, return LDR_v.
Returns:
The LDR. |
def downloaded(name,
version=None,
pkgs=None,
fromrepo=None,
ignore_epoch=None,
**kwargs):
'''
.. versionadded:: 2017.7.0
Ensure that the package is downloaded, and that it is the correct version
(if specified).
Currently supported for the following pkg providers:
:mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>`
:param str name:
The name of the package to be downloaded. This parameter is ignored if
either "pkgs" is used. Additionally, please note that this option can
only be used to download packages from a software repository.
:param str version:
Download a specific version of a package.
.. important::
As of version 2015.8.7, for distros which use yum/dnf, packages
which have a version with a nonzero epoch (that is, versions which
start with a number followed by a colon must have the epoch included
when specifying the version number. For example:
.. code-block:: yaml
vim-enhanced:
pkg.downloaded:
- version: 2:7.4.160-1.el7
An **ignore_epoch** argument has been added to which causes the
epoch to be disregarded when the state checks to see if the desired
version was installed.
You can install a specific version when using the ``pkgs`` argument by
including the version after the package:
.. code-block:: yaml
common_packages:
pkg.downloaded:
- pkgs:
- unzip
- dos2unix
- salt-minion: 2015.8.5-1.el6
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: yaml
zsh:
pkg.downloaded:
- version: 5.0.5-4.63
- fromrepo: "myrepository"
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'pkg.list_downloaded' not in __salt__:
ret['result'] = False
ret['comment'] = 'The pkg.downloaded state is not available on ' \
'this platform'
return ret
if not pkgs and isinstance(pkgs, list):
ret['result'] = True
ret['comment'] = 'No packages to download provided'
return ret
# If just a name (and optionally a version) is passed, just pack them into
# the pkgs argument.
if name and not pkgs:
if version:
pkgs = [{name: version}]
version = None
else:
pkgs = [name]
# It doesn't make sense here to received 'downloadonly' as kwargs
# as we're explicitly passing 'downloadonly=True' to execution module.
if 'downloadonly' in kwargs:
del kwargs['downloadonly']
pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs)
# Only downloading not yet downloaded packages
targets = _find_download_targets(name,
version,
pkgs,
fromrepo=fromrepo,
ignore_epoch=ignore_epoch,
**kwargs)
if isinstance(targets, dict) and 'result' in targets:
return targets
elif not isinstance(targets, dict):
ret['result'] = False
ret['comment'] = 'An error was encountered while checking targets: ' \
'{0}'.format(targets)
return ret
if __opts__['test']:
summary = ', '.join(targets)
ret['comment'] = 'The following packages would be ' \
'downloaded: {0}'.format(summary)
return ret
try:
pkg_ret = __salt__['pkg.install'](name=name,
pkgs=pkgs,
version=version,
downloadonly=True,
fromrepo=fromrepo,
ignore_epoch=ignore_epoch,
**kwargs)
ret['result'] = True
ret['changes'].update(pkg_ret)
except CommandExecutionError as exc:
ret = {'name': name, 'result': False}
if exc.info:
# Get information for state return from the exception.
ret['changes'] = exc.info.get('changes', {})
ret['comment'] = exc.strerror_without_changes
else:
ret['changes'] = {}
ret['comment'] = 'An error was encountered while downloading ' \
'package(s): {0}'.format(exc)
return ret
new_pkgs = __salt__['pkg.list_downloaded'](**kwargs)
ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch)
if failed:
summary = ', '.join([_get_desired_pkg(x, targets)
for x in failed])
ret['result'] = False
ret['comment'] = 'The following packages failed to ' \
'download: {0}'.format(summary)
if not ret['changes'] and not ret['comment']:
ret['result'] = True
ret['comment'] = 'Packages are already downloaded: ' \
'{0}'.format(', '.join(targets))
return ret | .. versionadded:: 2017.7.0
Ensure that the package is downloaded, and that it is the correct version
(if specified).
Currently supported for the following pkg providers:
:mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>`
:param str name:
The name of the package to be downloaded. This parameter is ignored if
either "pkgs" is used. Additionally, please note that this option can
only be used to download packages from a software repository.
:param str version:
Download a specific version of a package.
.. important::
As of version 2015.8.7, for distros which use yum/dnf, packages
which have a version with a nonzero epoch (that is, versions which
start with a number followed by a colon must have the epoch included
when specifying the version number. For example:
.. code-block:: yaml
vim-enhanced:
pkg.downloaded:
- version: 2:7.4.160-1.el7
An **ignore_epoch** argument has been added to which causes the
epoch to be disregarded when the state checks to see if the desired
version was installed.
You can install a specific version when using the ``pkgs`` argument by
including the version after the package:
.. code-block:: yaml
common_packages:
pkg.downloaded:
- pkgs:
- unzip
- dos2unix
- salt-minion: 2015.8.5-1.el6
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: yaml
zsh:
pkg.downloaded:
- version: 5.0.5-4.63
- fromrepo: "myrepository" |
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body | Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes |
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types:
return data
kwargs = {}
for attr, attr_type in iteritems(klass.swagger_types):
if data is not None \
and klass.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
return instance | Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object. |
def get_billing_report(self, month, **kwargs): # noqa: E501
"""Get billing report. # noqa: E501
Fetch the billing report generated for the currently authenticated commercial non-subtenant account. Billing reports for subtenant accounts are included in their aggregator's billing report response. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_billing_report(month, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str month: Queried year and month of billing report. (required)
:return: ReportResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_billing_report_with_http_info(month, **kwargs) # noqa: E501
else:
(data) = self.get_billing_report_with_http_info(month, **kwargs) # noqa: E501
return data | Get billing report. # noqa: E501
Fetch the billing report generated for the currently authenticated commercial non-subtenant account. Billing reports for subtenant accounts are included in their aggregator's billing report response. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_billing_report(month, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str month: Queried year and month of billing report. (required)
:return: ReportResponse
If the method is called asynchronously,
returns the request thread. |
def total(self):
'''
Returns sum of all counts in all features that are multisets.
'''
feats = imap(lambda name: self[name], self._counters())
return sum(chain(*map(lambda mset: map(abs, mset.values()), feats))) | Returns sum of all counts in all features that are multisets. |
def execute_prepared_cql3_query(self, itemId, values, consistency):
"""
Parameters:
- itemId
- values
- consistency
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_execute_prepared_cql3_query(itemId, values, consistency)
return d | Parameters:
- itemId
- values
- consistency |
def a_stays_connected(ctx):
"""Stay connected."""
ctx.ctrl.connected = True
ctx.device.connected = False
return True | Stay connected. |
def exit(self, status=EXIT_OK, message=None):
"""
Terminate the script.
"""
if not self.parser:
self.parser = argparse.ArgumentParser()
if self.msg_on_error_only:
# if msg_on_error_only is True
if status != EXIT_OK:
# if we have an error we'll exit with the message also.
self.parser.exit(status, message)
else:
# else we'll exit with the status ongly
self.parser.exit(status, None)
else:
# else if msg_on_error_only is not True
# we'll exit with the status and the message
self.parser.exit(status, message) | Terminate the script. |
def load(self, filepath):
"""Import '[Term]' entries from an .obo file."""
for attributeLines in oboTermParser(filepath):
oboTerm = _attributeLinesToDict(attributeLines)
if oboTerm['id'] not in self.oboTerms:
self.oboTerms[oboTerm['id']] = oboTerm
else:
oldOboTerm = self.oboTerms[oboTerm['id']]
oldTermIsObsolete = _termIsObsolete(oldOboTerm)
newTermIsObsolete = _termIsObsolete(oboTerm)
if oldTermIsObsolete and not newTermIsObsolete:
self.oboTerms[oboTerm['id']] = oboTerm
else:
#At least one of two terms with identical id must be obsolete
assert oldTermIsObsolete or newTermIsObsolete | Import '[Term]' entries from an .obo file. |
def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx) | Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+ |
def process_event(self, c):
"""Returns a message from tick() to be displayed if game is over"""
if c == "":
sys.exit()
elif c in key_directions:
self.move_entity(self.player, *vscale(self.player.speed, key_directions[c]))
else:
return "try arrow keys, w, a, s, d, or ctrl-D (you pressed %r)" % c
return self.tick() | Returns a message from tick() to be displayed if game is over |
def _establish_tunnel(self, connection, address):
'''Establish a TCP tunnel.
Coroutine.
'''
host = '[{}]'.format(address[0]) if ':' in address[0] else address[0]
port = address[1]
request = RawRequest('CONNECT', '{0}:{1}'.format(host, port))
self.add_auth_header(request)
stream = Stream(connection, keep_alive=True)
_logger.debug('Sending Connect.')
yield from stream.write_request(request)
_logger.debug('Read proxy response.')
response = yield from stream.read_response()
if response.status_code != 200:
debug_file = io.BytesIO()
_logger.debug('Read proxy response body.')
yield from stream.read_body(request, response, file=debug_file)
debug_file.seek(0)
_logger.debug(ascii(debug_file.read()))
if response.status_code == 200:
connection.tunneled = True
else:
raise NetworkError(
'Proxy does not support CONNECT: {} {}'
.format(response.status_code,
wpull.string.printable_str(response.reason))
) | Establish a TCP tunnel.
Coroutine. |
def to_type(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
inside_array = False
empty_array = True
templated_tokens = []
def add_type():
if not name_tokens:
return
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.is_keyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
templated_types = self.to_type(templated_tokens)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
del templated_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == ']':
inside_array = False
if empty_array:
pointer = True
else:
array = True
elif inside_array:
empty_array = False
elif token.name == '<':
templated_tokens, i = self._get_template_end(tokens, i + 1)
continue
elif token.name == ',' or token.name == '(':
add_type()
reference = pointer = array = False
empty_array = True
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
inside_array = True
elif token.name != ')':
name_tokens.append(token)
i += 1
add_type()
return result | Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...] |
def get_vexrc(options, environ):
"""Get a representation of the contents of the config file.
:returns:
a Vexrc instance.
"""
# Complain if user specified nonexistent file with --config.
# But we don't want to complain just because ~/.vexrc doesn't exist.
if options.config and not os.path.exists(options.config):
raise exceptions.InvalidVexrc("nonexistent config: {0!r}".format(options.config))
filename = options.config or os.path.expanduser('~/.vexrc')
vexrc = config.Vexrc.from_file(filename, environ)
return vexrc | Get a representation of the contents of the config file.
:returns:
a Vexrc instance. |
def set_mode(self, mode):
"""
:Parameters:
`mode` : *(int)*
New mode, must be one of:
- `StreamTokenizer.STRICT_MIN_LENGTH`
- `StreamTokenizer.DROP_TRAILING_SILENCE`
- `StreamTokenizer.STRICT_MIN_LENGTH | StreamTokenizer.DROP_TRAILING_SILENCE`
- `0`
See `StreamTokenizer.__init__` for more information about the mode.
"""
if not mode in [self.STRICT_MIN_LENGTH, self.DROP_TRAILING_SILENCE,
self.STRICT_MIN_LENGTH | self.DROP_TRAILING_SILENCE, 0]:
raise ValueError("Wrong value for mode")
self._mode = mode
self._strict_min_length = (mode & self.STRICT_MIN_LENGTH) != 0
self._drop_tailing_silence = (mode & self.DROP_TRAILING_SILENCE) != 0 | :Parameters:
`mode` : *(int)*
New mode, must be one of:
- `StreamTokenizer.STRICT_MIN_LENGTH`
- `StreamTokenizer.DROP_TRAILING_SILENCE`
- `StreamTokenizer.STRICT_MIN_LENGTH | StreamTokenizer.DROP_TRAILING_SILENCE`
- `0`
See `StreamTokenizer.__init__` for more information about the mode. |
def fromBban(bban):
"""
Convert the passed BBAN to an IBAN for this country specification.
Please note that <i>"generation of the IBAN shall be the exclusive
responsibility of the bank/branch servicing the account"</i>.
This method implements the preferred algorithm described in
http://en.wikipedia.org/wiki/International_Bank_Account_Number#Generating_IBAN_check_digits
@method fromBban
@param {String} bban the BBAN to convert to IBAN
@returns {Iban} the IBAN object
"""
countryCode = "XE"
remainder = mod9710(iso13616Prepare(countryCode + "00" + bban))
checkDigit = ("0" + str(98 - remainder))[-2:]
return Iban(countryCode + checkDigit + bban) | Convert the passed BBAN to an IBAN for this country specification.
Please note that <i>"generation of the IBAN shall be the exclusive
responsibility of the bank/branch servicing the account"</i>.
This method implements the preferred algorithm described in
http://en.wikipedia.org/wiki/International_Bank_Account_Number#Generating_IBAN_check_digits
@method fromBban
@param {String} bban the BBAN to convert to IBAN
@returns {Iban} the IBAN object |
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warn("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
# Like Python itself, ignore the second if the first of these is present
elif self.file_to_run:
fname = self.file_to_run
try:
self._exec_file(fname)
except:
self.log.warn("Error in executing file in user namespace: %s" %
fname)
self.shell.showtraceback() | Run code or file specified at the command-line |
def to_routing_header(params):
"""Returns a routing header string for the given request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
str: The routing header string.
"""
if sys.version_info[0] < 3:
# Python 2 does not have the "safe" parameter for urlencode.
return urlencode(params).replace("%2F", "/")
return urlencode(
params,
# Per Google API policy (go/api-url-encoding), / is not encoded.
safe="/",
) | Returns a routing header string for the given request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
str: The routing header string. |
def add_file_arg(self, filename):
"""
Add a file argument to the executable. Arguments are appended after any
options and their order is guaranteed. Also adds the file name to the
list of required input data for this job.
@param filename: file to add as argument.
"""
self.__arguments.append(filename)
if filename not in self.__input_files:
self.__input_files.append(filename) | Add a file argument to the executable. Arguments are appended after any
options and their order is guaranteed. Also adds the file name to the
list of required input data for this job.
@param filename: file to add as argument. |
def get_vnetwork_vms_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def check_prompt_code(response):
"""
Sometimes there is an additional numerical code on the response page that needs to be selected
on the prompt from a list of multiple choice. Print it if it's there.
"""
num_code = response.find("div", {"jsname": "EKvSSd"})
if num_code:
print("numerical code for prompt: {}".format(num_code.string)) | Sometimes there is an additional numerical code on the response page that needs to be selected
on the prompt from a list of multiple choice. Print it if it's there. |
def _generate_null_hocr(output_hocr, output_sidecar, image):
"""Produce a .hocr file that reports no text detected on a page that is
the same size as the input image."""
from PIL import Image
im = Image.open(image)
w, h = im.size
with open(output_hocr, 'w', encoding="utf-8") as f:
f.write(HOCR_TEMPLATE.format(w, h))
with open(output_sidecar, 'w', encoding='utf-8') as f:
f.write('[skipped page]') | Produce a .hocr file that reports no text detected on a page that is
the same size as the input image. |
def get_activity_photos(self, activity_id, size=None, only_instagram=False):
"""
Gets the photos from an activity.
http://strava.github.io/api/v3/photos/
:param activity_id: The activity for which to fetch kudos.
:type activity_id: int
:param size: the requested size of the activity's photos. URLs for the photos will be returned that best match
the requested size. If not included, the smallest size is returned
:type size: int
:param only_instagram: Parameter to preserve legacy behavior of only returning Instagram photos.
:type only_instagram: bool
:return: An iterator of :class:`stravalib.model.ActivityPhoto` objects.
:rtype: :class:`BatchedResultsIterator`
"""
params = {}
if not only_instagram:
params['photo_sources'] = 'true'
if size is not None:
params['size'] = size
result_fetcher = functools.partial(self.protocol.get,
'/activities/{id}/photos',
id=activity_id, **params)
return BatchedResultsIterator(entity=model.ActivityPhoto,
bind_client=self,
result_fetcher=result_fetcher) | Gets the photos from an activity.
http://strava.github.io/api/v3/photos/
:param activity_id: The activity for which to fetch kudos.
:type activity_id: int
:param size: the requested size of the activity's photos. URLs for the photos will be returned that best match
the requested size. If not included, the smallest size is returned
:type size: int
:param only_instagram: Parameter to preserve legacy behavior of only returning Instagram photos.
:type only_instagram: bool
:return: An iterator of :class:`stravalib.model.ActivityPhoto` objects.
:rtype: :class:`BatchedResultsIterator` |
def transfer_owner(self, new_owner: Address) -> TxReceipt:
"""
Transfers ownership of this registry instance to the given ``new_owner``. Only the
``owner`` is allowed to transfer ownership.
* Parameters:
* ``new_owner``: The address of the new owner.
"""
tx_hash = self.registry.functions.transferOwner(new_owner).transact()
return self.w3.eth.waitForTransactionReceipt(tx_hash) | Transfers ownership of this registry instance to the given ``new_owner``. Only the
``owner`` is allowed to transfer ownership.
* Parameters:
* ``new_owner``: The address of the new owner. |
def readline(self):
"""Read one line from the pseudoterminal, and return it as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
b = super(PtyProcessUnicode, self).readline()
return self.decoder.decode(b, final=False) | Read one line from the pseudoterminal, and return it as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed. |
def _set_source(self, v, load=False):
"""
Setter method for source, mapped from YANG variable /acl_mirror/source (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_source is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("src_interface_type src_interface_name destination dst_interface_type dst_interface_name",source.source, yang_name="source", rest_name="source", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-interface-type src-interface-name destination dst-interface-type dst-interface-name', extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}), is_container='list', yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("src_interface_type src_interface_name destination dst_interface_type dst_interface_name",source.source, yang_name="source", rest_name="source", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-interface-type src-interface-name destination dst-interface-type dst-interface-name', extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}), is_container='list', yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='list', is_config=True)""",
})
self.__source = t
if hasattr(self, '_set'):
self._set() | Setter method for source, mapped from YANG variable /acl_mirror/source (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_source is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source() directly. |
def _refresh_resource_index(self, resource):
"""Refresh index for given resource.
:param resource: resource name
"""
if self._resource_config(resource, 'FORCE_REFRESH', True):
self.elastic(resource).indices.refresh(self._resource_index(resource)) | Refresh index for given resource.
:param resource: resource name |
def load_code(self):
"""
Returns a Python code object like xdis.unmarshal.load_code(),
but in we decrypt the data in self.bufstr.
That is:
* calculate the TEA key,
* decrypt self.bufstr
* create and return a Python code-object
"""
a = self.load_int()
b = self.load_int()
key = get_keys(a, b)
padsize = (b + 15) & ~0xf
intsize = padsize/4
data = self.bufstr[self.bufpos:self.bufpos+padsize]
# print("%d: %d (%d=%d)" % (self.bufpos, b, padsize, len(data)))
data = list(struct.unpack('<%dL' % intsize, data))
tea_decipher(data, key)
self.bufpos += padsize
obj = xmarshal._FastUnmarshaller(struct.pack('<%dL' % intsize, *data))
code = obj.load_code()
co_code = patch(code.co_code)
if PYTHON3:
return Code2Compat(code.co_argcount, code.co_nlocals, code.co_stacksize,
code.co_flags,
co_code, code.co_consts, code.co_names, code.co_varnames,
code.co_filename, code.co_name, code.co_firstlineno,
code.co_lnotab, code.co_freevars, code.co_cellvars)
else:
return types.CodeType(code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags,
co_code, code.co_consts, code.co_names, code.co_varnames,
code.co_filename, code.co_name, code.co_firstlineno,
code.co_lnotab, code.co_freevars, code.co_cellvars) | Returns a Python code object like xdis.unmarshal.load_code(),
but in we decrypt the data in self.bufstr.
That is:
* calculate the TEA key,
* decrypt self.bufstr
* create and return a Python code-object |
async def main():
"""Run."""
async with ClientSession() as websession:
try:
client = Client(websession)
await client.load_local('<IP ADDRESS>', '<PASSWORD>', websession)
for controller in client.controllers.values():
print('CLIENT INFORMATION')
print('Name: {0}'.format(controller.name))
print('MAC Address: {0}'.format(controller.mac))
print('API Version: {0}'.format(controller.api_version))
print(
'Software Version: {0}'.format(
controller.software_version))
print(
'Hardware Version: {0}'.format(
controller.hardware_version))
# Work with diagnostics:
print()
print('RAINMACHINE DIAGNOSTICS')
data = await controller.diagnostics.current()
print('Uptime: {0}'.format(data['uptime']))
print('Software Version: {0}'.format(data['softwareVersion']))
# Work with parsers:
print()
print('RAINMACHINE PARSERS')
for parser in await controller.parsers.current():
print(parser['name'])
# Work with programs:
print()
print('ALL PROGRAMS')
for program in await controller.programs.all(
include_inactive=True):
print(
'Program #{0}: {1}'.format(
program['uid'], program['name']))
print()
print('PROGRAM BY ID')
program_1 = await controller.programs.get(1)
print(
"Program 1's Start Time: {0}".format(
program_1['startTime']))
print()
print('NEXT RUN TIMES')
for program in await controller.programs.next():
print(
'Program #{0}: {1}'.format(
program['pid'], program['startTime']))
print()
print('RUNNING PROGRAMS')
for program in await controller.programs.running():
print('Program #{0}'.format(program['uid']))
print()
print('STARTING PROGRAM #1')
print(await controller.programs.start(1))
await asyncio.sleep(3)
print()
print('STOPPING PROGRAM #1')
print(await controller.programs.stop(1))
# Work with provisioning:
print()
print('PROVISIONING INFO')
name = await controller.provisioning.device_name
print('Device Name: {0}'.format(name))
settings = await controller.provisioning.settings()
print(
'Database Path: {0}'.format(
settings['system']['databasePath']))
print(
'Station Name: {0}'.format(
settings['location']['stationName']))
wifi = await controller.provisioning.wifi()
print('IP Address: {0}'.format(wifi['ipAddress']))
# Work with restrictions:
print()
print('RESTRICTIONS')
current = await controller.restrictions.current()
print(
'Rain Delay Restrictions: {0}'.format(
current['rainDelay']))
universal = await controller.restrictions.universal()
print(
'Freeze Protect: {0}'.format(
universal['freezeProtectEnabled']))
print('Hourly Restrictions:')
for restriction in await controller.restrictions.hourly():
print(restriction['name'])
raindelay = await controller.restrictions.raindelay()
print(
'Rain Delay Counter: {0}'.format(
raindelay['delayCounter']))
# Work with restrictions:
print()
print('STATS')
today = await controller.stats.on_date(
date=datetime.date.today())
print('Min for Today: {0}'.format(today['mint']))
for day in await controller.stats.upcoming(details=True):
print('{0} Min: {1}'.format(day['day'], day['mint']))
# Work with watering:
print()
print('WATERING')
for day in await controller.watering.log(
date=datetime.date.today()):
print(
'{0} duration: {1}'.format(
day['date'], day['realDuration']))
queue = await controller.watering.queue()
print('Current Queue: {0}'.format(queue))
print('Runs:')
for watering_run in await controller.watering.runs(
date=datetime.date.today()):
print(
'{0} ({1})'.format(
watering_run['dateTime'], watering_run['et0']))
print()
print('PAUSING ALL WATERING FOR 30 SECONDS')
print(await controller.watering.pause_all(30))
await asyncio.sleep(3)
print()
print('UNPAUSING WATERING')
print(await controller.watering.unpause_all())
print()
print('STOPPING ALL WATERING')
print(await controller.watering.stop_all())
# Work with zones:
print()
print('ALL ACTIVE ZONES')
for zone in await controller.zones.all(details=True):
print(
'Zone #{0}: {1} (soil: {2})'.format(
zone['uid'], zone['name'], zone['soil']))
print()
print('ZONE BY ID')
zone_1 = await controller.zones.get(1, details=True)
print(
"Zone 1's Name: {0} (soil: {1})".format(
zone_1['name'], zone_1['soil']))
print()
print('STARTING ZONE #1 FOR 3 SECONDS')
print(await controller.zones.start(1, 3))
await asyncio.sleep(3)
print()
print('STOPPING ZONE #1')
print(await controller.zones.stop(1))
except RainMachineError as err:
print(err) | Run. |
def override (overrider_id, overridee_id):
"""Make generator 'overrider-id' be preferred to
'overridee-id'. If, when searching for generators
that could produce a target of certain type,
both those generators are amoung viable generators,
the overridden generator is immediately discarded.
The overridden generators are discarded immediately
after computing the list of viable generators, before
running any of them."""
assert isinstance(overrider_id, basestring)
assert isinstance(overridee_id, basestring)
__overrides.setdefault(overrider_id, []).append(overridee_id) | Make generator 'overrider-id' be preferred to
'overridee-id'. If, when searching for generators
that could produce a target of certain type,
both those generators are amoung viable generators,
the overridden generator is immediately discarded.
The overridden generators are discarded immediately
after computing the list of viable generators, before
running any of them. |
def export_pages(root_page, export_unpublished=False):
"""
Create a JSON defintion of part of a site's page tree starting
from root_page and descending into its descendants
By default only published pages are exported.
If a page is unpublished it and all its descendants are pruned even
if some of those descendants are themselves published. This ensures
that there are no orphan pages when the subtree is created in the
destination site.
If export_unpublished=True the root_page and all its descendants
are included.
"""
pages = Page.objects.descendant_of(root_page, inclusive=True).order_by('path').specific()
if not export_unpublished:
pages = pages.filter(live=True)
page_data = []
exported_paths = set()
for (i, page) in enumerate(pages):
parent_path = page.path[:-(Page.steplen)]
# skip over pages whose parents haven't already been exported
# (which means that export_unpublished is false and the parent was unpublished)
if i == 0 or (parent_path in exported_paths):
page_data.append({
'content': json.loads(page.to_json()),
'model': page.content_type.model,
'app_label': page.content_type.app_label,
})
exported_paths.add(page.path)
return {
'pages': page_data
} | Create a JSON defintion of part of a site's page tree starting
from root_page and descending into its descendants
By default only published pages are exported.
If a page is unpublished it and all its descendants are pruned even
if some of those descendants are themselves published. This ensures
that there are no orphan pages when the subtree is created in the
destination site.
If export_unpublished=True the root_page and all its descendants
are included. |
def _calculate_mapping_reads(items, work_dir, input_backs=None):
"""Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file.
"""
out_file = os.path.join(work_dir, "mapping_reads.txt")
if not utils.file_exists(out_file):
lines = []
for data in items:
count = 0
for line in subprocess.check_output([
"samtools", "idxstats", dd.get_align_bam(data)]).decode().split("\n"):
if line.strip():
count += int(line.split("\t")[2])
lines.append("%s\t%s" % (dd.get_sample_name(data), count))
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("\n".join(lines) + "\n")
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
out_handle.write(line)
return out_file | Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file. |
def rebuild(self, recreate=True, force=False, **kwargs):
"Recreate (if needed) the wx_obj and apply new properties"
# detect if this involves a spec that needs to recreate the wx_obj:
needs_rebuild = any([isinstance(spec, (StyleSpec, InitSpec))
for spec_name, spec in self._meta.specs.items()
if spec_name in kwargs])
# validate if this gui object needs and support recreation
if needs_rebuild and recreate or force:
if DEBUG: print "rebuilding window!"
# recreate the wx_obj! warning: it will call Destroy()
self.__init__(**kwargs)
else:
if DEBUG: print "just setting attr!"
for name, value in kwargs.items():
setattr(self, name, value) | Recreate (if needed) the wx_obj and apply new properties |
def compare_version(version1, version2):
"""
Compares two versions.
"""
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return (normalize(version1) > normalize(version2))-(normalize(version1) < normalize(version2)) | Compares two versions. |
def write_antenna(page, args, seg_plot=None, grid=False, ipn=False):
"""
Write antenna factors to merkup.page object page and generate John's
detector response plot.
"""
from pylal import antenna
page.h3()
page.add('Antenna factors and sky locations')
page.h3.close()
th = []
td = []
th2 = []
td2 = []
ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]
if ipn:
antenna_ifo = {}
ra = []
dec = []
# FIXME: Remove hardcoding here and show this in all cases
search_file = open('../../../S5IPN_GRB%s_search_180deg.txt'
% args.grb_name)
for line in search_file:
ra.append(line.split()[0])
dec.append(line.split()[1])
for ifo in ifos:
antenna_ifo[ifo] = []
for k, l in zip(ra, dec):
_, _, _, f_q = antenna.response(args.start_time, float(k),
float(l), 0.0, 0.0, 'degree',
ifo)
antenna_ifo[ifo].append(round(f_q,3))
dectKeys = antenna_ifo.keys()
for elements in range(len(antenna_ifo.values()[0])):
newDict={}
for detectors in range(len(antenna_ifo.keys())):
newDict[dectKeys[detectors]] = antenna_ifo[\
dectKeys[detectors]][elements]
for key in newDict.keys():
th.append(key)
td.append(newDict.values())
page = write_table(page, list(set(th)), td)
for ifo in ifos:
_, _, _, f_q = antenna.response(args.start_time, args.ra, args.dec,
0.0, 0.0, 'degree',ifo)
th.append(ifo)
td.append(round(f_q, 3))
#FIXME: Work out a way to make these external calls safely
#cmmnd = 'projectedDetectorTensor --gps-sec %d --ra-deg %f --dec-deg %f' \
# % (args.start_time,args.ra, args.dec)
#for ifo in ifos:
# if ifo == 'H1':
# cmmnd += ' --display-lho'
# elif ifo == 'L1':
# cmmnd += ' --display-llo'
# elif ifo == 'V1':
# cmmnd += ' --display-virgo'
#status = make_external_call(cmmnd)
page = write_table(page, th, td)
# plot = markup.page()
# p = "projtens.png"
# plot.a(href=p, title="Detector response and polarization")
# plot.img(src=p)
# plot.a.close()
# th2 = ['Response Diagram']
# td2 = [plot() ]
# FIXME: Add these in!!
# plot = markup.page()
# p = "ALL_TIMES/plots_clustered/GRB%s_search.png"\
# % args.grb_name
# plot.a(href=p, title="Error Box Search")
# plot.img(src=p)
# plot.a.close()
# th2.append('Error Box Search')
# td2.append(plot())
# plot = markup.page()
# p = "ALL_TIMES/plots_clustered/GRB%s_simulations.png"\
# % args.grb_name
# plot.a(href=p, title="Error Box Simulations")
# plot.img(src=p)
# plot.a.close()
# th2.append('Error Box Simulations')
# td2.append(plot())
if seg_plot is not None:
plot = markup.page()
p = os.path.basename(seg_plot)
plot.a(href=p, title="Science Segments")
plot.img(src=p)
plot.a.close()
th2.append('Science Segments')
td2.append(plot())
plot = markup.page()
p = "ALL_TIMES/plots_clustered/GRB%s_sky_grid.png"\
% args.grb_name
plot.a(href=p, title="Sky Grid")
plot.img(src=p)
plot.a.close()
th2.append('Sky Grid')
td2.append(plot())
# plot = markup.page()
# p = "GRB%s_inspiral_horizon_distance.png"\
# % args.grb_name
# plot.a(href=p, title="Inspiral Horizon Distance")
# plot.img(src=p)
# plot.a.close()
# th2.append('Inspiral Horizon Distance')
# td2.append(plot())
page = write_table(page, th2, td2)
return page | Write antenna factors to merkup.page object page and generate John's
detector response plot. |
def cleanPolyline(elem, options):
"""
Scour the polyline points attribute
"""
pts = parseListOfPoints(elem.getAttribute('points'))
elem.setAttribute('points', scourCoordinates(pts, options, True)) | Scour the polyline points attribute |
def backfill_previous_messages(self, reverse=False, limit=10):
"""Backfill handling of previous messages.
Args:
reverse (bool): When false messages will be backfilled in their original
order (old to new), otherwise the order will be reversed (new to old).
limit (int): Number of messages to go back.
"""
res = self.client.api.get_room_messages(self.room_id, self.prev_batch,
direction="b", limit=limit)
events = res["chunk"]
if not reverse:
events = reversed(events)
for event in events:
self._put_event(event) | Backfill handling of previous messages.
Args:
reverse (bool): When false messages will be backfilled in their original
order (old to new), otherwise the order will be reversed (new to old).
limit (int): Number of messages to go back. |
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.params()):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
else:
owner = meth.__self__
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except:
return str(callable_obj) | Attempt to return a meaningful name identifying a callable or generator |
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func) | Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called. |
def _handle_pong(self, ts, *args, **kwargs):
"""
Handles pong messages; resets the self.ping_timer variable and logs
info message.
:param ts: timestamp, declares when data was received by the client
:return:
"""
log.info("BitfinexWSS.ping(): Ping received! (%ss)",
ts - self.ping_timer)
self.ping_timer = None | Handles pong messages; resets the self.ping_timer variable and logs
info message.
:param ts: timestamp, declares when data was received by the client
:return: |
def filter_by(self, values, exclude=False):
"""
Filter an SArray by values inside an iterable object. The result is an SArray that
only includes (or excludes) the values in the given ``values`` :class:`~turicreate.SArray`.
If ``values`` is not an SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SArray. The resulting SArray will
only include rows that have one of these values in the given
column.
exclude : bool
If True, the result SArray will contain all rows EXCEPT those that
have one of the ``values``.
Returns
-------
out : SArray
The filtered SArray.
Examples
--------
>>> sa = SArray(['dog', 'cat', 'cow', 'horse'])
>>> sa.filter_by(['cat', 'hamster', 'dog', 'fish', 'bird', 'snake'])
dtype: str
Rows: 2
['dog', 'cat']
>>> sa.filter_by(['cat', 'hamster', 'dog', 'fish', 'bird', 'snake'], exclude=True)
dtype: str
Rows: 2
['horse', 'cow']
"""
from .sframe import SFrame as _SFrame
column_name = 'sarray'
# Convert values to SArray
if not isinstance(values, SArray): #type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not _is_non_string_iterable(values):
values = [values]
values = SArray(values)
# Convert values to SFrame
value_sf = _SFrame()
value_sf.add_column(values, column_name, inplace=True)
given_type = value_sf.column_types()[0] #value column type
existing_type = self.dtype
sarray_sf = _SFrame()
sarray_sf.add_column(self, column_name, inplace=True)
if given_type != existing_type:
raise TypeError("Type of given values does not match type of the SArray")
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
with cython_context():
if exclude:
id_name = "id"
value_sf = value_sf.add_row_number(id_name)
tmp = _SFrame(_proxy=sarray_sf.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
return ret_sf[column_name]
else:
ret_sf = _SFrame(_proxy=sarray_sf.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
return ret_sf[column_name] | Filter an SArray by values inside an iterable object. The result is an SArray that
only includes (or excludes) the values in the given ``values`` :class:`~turicreate.SArray`.
If ``values`` is not an SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SArray. The resulting SArray will
only include rows that have one of these values in the given
column.
exclude : bool
If True, the result SArray will contain all rows EXCEPT those that
have one of the ``values``.
Returns
-------
out : SArray
The filtered SArray.
Examples
--------
>>> sa = SArray(['dog', 'cat', 'cow', 'horse'])
>>> sa.filter_by(['cat', 'hamster', 'dog', 'fish', 'bird', 'snake'])
dtype: str
Rows: 2
['dog', 'cat']
>>> sa.filter_by(['cat', 'hamster', 'dog', 'fish', 'bird', 'snake'], exclude=True)
dtype: str
Rows: 2
['horse', 'cow'] |
def get_keywords_from_text(text_lines, taxonomy_name, output_mode="text",
output_limit=None,
spires=False, match_mode="full", no_cache=False,
with_author_keywords=False, rebuild_cache=False,
only_core_tags=False, extract_acronyms=False):
"""Extract keywords from the list of strings.
:param text_lines: list of strings (will be normalized before being
joined into one string)
:param taxonomy_name: string, name of the taxonomy_name
:param output_mode: string - text|html|marcxml|raw
:param output_limit: int
:param spires: boolean, if True marcxml output reflect spires codes.
:param match_mode: str - partial|full; in partial mode only
beginning of the fulltext is searched.
:param no_cache: boolean, means loaded definitions will not be saved.
:param with_author_keywords: boolean, extract keywords from the pdfs.
:param rebuild_cache: boolean
:param only_core_tags: boolean
:return: if output_mode=raw, it will return
(single_keywords, composite_keywords, author_keywords, acronyms)
for other output modes it returns formatted string
"""
if output_limit is None:
output_limit = current_app.config['CLASSIFIER_DEFAULT_OUTPUT_NUMBER']
cache = get_cache(taxonomy_name)
if not cache:
set_cache(taxonomy_name,
get_regular_expressions(taxonomy_name,
rebuild=rebuild_cache,
no_cache=no_cache))
cache = get_cache(taxonomy_name)
_skw = cache[0]
_ckw = cache[1]
text_lines = cut_references(text_lines)
fulltext = normalize_fulltext("\n".join(text_lines))
if match_mode == "partial":
fulltext = get_partial_text(fulltext)
author_keywords = None
if with_author_keywords:
author_keywords = extract_author_keywords(_skw, _ckw, fulltext)
acronyms = {}
if extract_acronyms:
acronyms = extract_abbreviations(fulltext)
single_keywords = extract_single_keywords(_skw, fulltext)
composite_keywords = extract_composite_keywords(
_ckw, fulltext, single_keywords)
if only_core_tags:
single_keywords = clean_before_output(
filter_core_keywords(single_keywords))
composite_keywords = filter_core_keywords(composite_keywords)
else:
# Filter out the "nonstandalone" keywords
single_keywords = clean_before_output(single_keywords)
return get_keywords_output(
single_keywords=single_keywords,
composite_keywords=composite_keywords,
taxonomy_name=taxonomy_name,
author_keywords=author_keywords,
acronyms=acronyms,
output_mode=output_mode,
output_limit=output_limit,
spires=spires,
only_core_tags=only_core_tags
) | Extract keywords from the list of strings.
:param text_lines: list of strings (will be normalized before being
joined into one string)
:param taxonomy_name: string, name of the taxonomy_name
:param output_mode: string - text|html|marcxml|raw
:param output_limit: int
:param spires: boolean, if True marcxml output reflect spires codes.
:param match_mode: str - partial|full; in partial mode only
beginning of the fulltext is searched.
:param no_cache: boolean, means loaded definitions will not be saved.
:param with_author_keywords: boolean, extract keywords from the pdfs.
:param rebuild_cache: boolean
:param only_core_tags: boolean
:return: if output_mode=raw, it will return
(single_keywords, composite_keywords, author_keywords, acronyms)
for other output modes it returns formatted string |
def matchPatterns(patterns, keys):
"""
Returns a subset of the keys that match any of the given patterns
:param patterns: (list) regular expressions to match
:param keys: (list) keys to search for matches
"""
results = []
if patterns:
for pattern in patterns:
prog = re.compile(pattern)
for key in keys:
if prog.match(key):
results.append(key)
else:
return None
return results | Returns a subset of the keys that match any of the given patterns
:param patterns: (list) regular expressions to match
:param keys: (list) keys to search for matches |
def removeChild(self, child):
'''
removeChild - Remove a child tag, if present.
@param child <AdvancedTag> - The child to remove
@return - The child [with parentNode cleared] if removed, otherwise None.
NOTE: This removes a tag. If removing a text block, use #removeText function.
If you need to remove an arbitrary block (text or AdvancedTag), @see removeBlock
Removing multiple children? @see removeChildren
'''
try:
# Remove from children and blocks
self.children.remove(child)
self.blocks.remove(child)
# Clear parent node association on child
child.parentNode = None
# Clear document reference on removed child and all children thereof
child.ownerDocument = None
for subChild in child.getAllChildNodes():
subChild.ownerDocument = None
return child
except ValueError:
# TODO: What circumstances cause this to be raised? Is it okay to have a partial remove?
#
# Is it only when "child" is not found? Should that just be explicitly tested?
return None | removeChild - Remove a child tag, if present.
@param child <AdvancedTag> - The child to remove
@return - The child [with parentNode cleared] if removed, otherwise None.
NOTE: This removes a tag. If removing a text block, use #removeText function.
If you need to remove an arbitrary block (text or AdvancedTag), @see removeBlock
Removing multiple children? @see removeChildren |
def subdivide(self):
r"""Split the curve :math:`B(s)` into a left and right half.
Takes the interval :math:`\left[0, 1\right]` and splits the curve into
:math:`B_1 = B\left(\left[0, \frac{1}{2}\right]\right)` and
:math:`B_2 = B\left(\left[\frac{1}{2}, 1\right]\right)`. In
order to do this, also reparameterizes the curve, hence the resulting
left and right halves have new nodes.
.. image:: ../../images/curve_subdivide.png
:align: center
.. doctest:: curve-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 2.0],
... [0.0, 3.0 , 1.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> left, right = curve.subdivide()
>>> left.nodes
array([[0. , 0.625, 1.125],
[0. , 1.5 , 1.75 ]])
>>> right.nodes
array([[1.125, 1.625, 2. ],
[1.75 , 2. , 1. ]])
.. testcleanup:: curve-subdivide
import make_images
make_images.curve_subdivide(curve, left, right)
Returns:
Tuple[Curve, Curve]: The left and right sub-curves.
"""
left_nodes, right_nodes = _curve_helpers.subdivide_nodes(self._nodes)
left = Curve(left_nodes, self._degree, _copy=False)
right = Curve(right_nodes, self._degree, _copy=False)
return left, right | r"""Split the curve :math:`B(s)` into a left and right half.
Takes the interval :math:`\left[0, 1\right]` and splits the curve into
:math:`B_1 = B\left(\left[0, \frac{1}{2}\right]\right)` and
:math:`B_2 = B\left(\left[\frac{1}{2}, 1\right]\right)`. In
order to do this, also reparameterizes the curve, hence the resulting
left and right halves have new nodes.
.. image:: ../../images/curve_subdivide.png
:align: center
.. doctest:: curve-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 2.0],
... [0.0, 3.0 , 1.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> left, right = curve.subdivide()
>>> left.nodes
array([[0. , 0.625, 1.125],
[0. , 1.5 , 1.75 ]])
>>> right.nodes
array([[1.125, 1.625, 2. ],
[1.75 , 2. , 1. ]])
.. testcleanup:: curve-subdivide
import make_images
make_images.curve_subdivide(curve, left, right)
Returns:
Tuple[Curve, Curve]: The left and right sub-curves. |
def status_set(workload_state, message):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
workload_state -- valid juju workload state.
message -- status update message
"""
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
if workload_state not in valid_states:
raise ValueError(
'{!r} is not a valid workload state'.format(workload_state)
)
cmd = ['status-set', workload_state, message]
try:
ret = subprocess.call(cmd)
if ret == 0:
return
except OSError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'status-set failed: {} {}'.format(workload_state,
message)
log(log_message, level='INFO') | Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
workload_state -- valid juju workload state.
message -- status update message |
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
"""
return Double(context.jvm_view().ArcSinVertex, label, cast_to_double_vertex(input_vertex)) | Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex |
def simplify(self):
"""Return a simplified expression."""
node = self.node.simplify()
if node is self.node:
return self
else:
return _expr(node) | Return a simplified expression. |
def do_gate_matrix(self, matrix: np.ndarray,
qubits: Sequence[int]) -> 'AbstractQuantumSimulator':
"""
Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done
:param qubits: A list of qubits to apply the unitary to.
:return: ``self`` to support method chaining.
"""
unitary = lifted_gate_matrix(matrix=matrix, qubit_inds=qubits, n_qubits=self.n_qubits)
self.density = unitary.dot(self.density).dot(np.conj(unitary).T)
return self | Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done
:param qubits: A list of qubits to apply the unitary to.
:return: ``self`` to support method chaining. |
def strlify(a):
'''
Used to turn hexlify() into hex string.
Does nothing in Python 2, but is necessary for Python 3, so that
all inputs and outputs are always the same encoding. Most of the
time it doesn't matter, but some functions in Python 3 brick when
they get bytes instead of a string, so it's safer to just
strlify() everything.
In Python 3 for example (examples commented out for doctest):
# >>> hexlify(unhexlify("a1b2c3"))
b'a1b2c3'
# >>> b'a1b2c3' == 'a1b2c3'
False
# >>> strlify(hexlify(unhexlify("a1b2c3")))
'a1b2c3'
Whereas in Python 2, the results would be:
# >>> hexlify(unhexlify("a1b2c3"))
'a1b2c3'
# >>> b'a1b2c3' == 'a1b2c3'
True
# >>> strlify(hexlify(unhexlify("a1b2c3")))
'a1b2c3'
Safe to use redundantly on hex and base64 that may or may not be
byte objects, as well as base58, since hex and base64 and base58
strings will never have "b'" in the middle of them.
Obviously it's NOT safe to use on random strings which might have
"b'" in the middle of the string.
Use this for making sure base 16/58/64 objects are in string
format.
Use normalize_input() below to convert unicode objects back to
ascii strings when possible.
'''
if a == b'b' or a == 'b':
return 'b'
return str(a).rstrip("'").replace("b'","",1).replace("'","") | Used to turn hexlify() into hex string.
Does nothing in Python 2, but is necessary for Python 3, so that
all inputs and outputs are always the same encoding. Most of the
time it doesn't matter, but some functions in Python 3 brick when
they get bytes instead of a string, so it's safer to just
strlify() everything.
In Python 3 for example (examples commented out for doctest):
# >>> hexlify(unhexlify("a1b2c3"))
b'a1b2c3'
# >>> b'a1b2c3' == 'a1b2c3'
False
# >>> strlify(hexlify(unhexlify("a1b2c3")))
'a1b2c3'
Whereas in Python 2, the results would be:
# >>> hexlify(unhexlify("a1b2c3"))
'a1b2c3'
# >>> b'a1b2c3' == 'a1b2c3'
True
# >>> strlify(hexlify(unhexlify("a1b2c3")))
'a1b2c3'
Safe to use redundantly on hex and base64 that may or may not be
byte objects, as well as base58, since hex and base64 and base58
strings will never have "b'" in the middle of them.
Obviously it's NOT safe to use on random strings which might have
"b'" in the middle of the string.
Use this for making sure base 16/58/64 objects are in string
format.
Use normalize_input() below to convert unicode objects back to
ascii strings when possible. |
def _lval_add_towards_polarity(x, polarity):
"""Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity.
"""
if x < 0:
if polarity < 0:
return Lval('toinf', x)
return Lval('pastzero', x)
elif polarity > 0:
return Lval('toinf', x)
return Lval('pastzero', x) | Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity. |
def yield_sorted_by_type(*typelist):
"""
a useful decorator for the collect_impl method of SuperChange
subclasses. Caches the yielded changes, and re-emits them
collected by their type. The order of the types can be specified
by listing the types as arguments to this decorator. Unlisted
types will be yielded last in no guaranteed order.
Grouping happens by exact type match only. Inheritance is not
taken into consideration for grouping.
"""
def decorate(fun):
@wraps(fun)
def decorated(*args, **kwds):
return iterate_by_type(fun(*args, **kwds), typelist)
return decorated
return decorate | a useful decorator for the collect_impl method of SuperChange
subclasses. Caches the yielded changes, and re-emits them
collected by their type. The order of the types can be specified
by listing the types as arguments to this decorator. Unlisted
types will be yielded last in no guaranteed order.
Grouping happens by exact type match only. Inheritance is not
taken into consideration for grouping. |
def _initiate_resumable_upload(self, stream, metadata, num_retries):
"""Initiate a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: tuple
:returns:
Pair of
* The :class:`~google.resumable_media.requests.ResumableUpload`
that was created
* The ``transport`` used to initiate the upload.
"""
chunk_size = _DEFAULT_CHUNKSIZE
transport = self._http
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _RESUMABLE_URL_TEMPLATE.format(project=self.project)
# TODO: modify ResumableUpload to take a retry.Retry object
# that it can use for the initial RPC.
upload = ResumableUpload(upload_url, chunk_size, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
upload.initiate(
transport, stream, metadata, _GENERIC_CONTENT_TYPE, stream_final=False
)
return upload, transport | Initiate a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: tuple
:returns:
Pair of
* The :class:`~google.resumable_media.requests.ResumableUpload`
that was created
* The ``transport`` used to initiate the upload. |
def make_decoder(activation, latent_size, output_shape, base_depth):
"""Creates the decoder function.
Args:
activation: Activation function in hidden layers.
latent_size: Dimensionality of the encoding.
output_shape: The output image shape.
base_depth: Smallest depth for a layer.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over images.
"""
deconv = functools.partial(
tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation)
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
decoder_net = tf.keras.Sequential([
deconv(2 * base_depth, 7, padding="VALID"),
deconv(2 * base_depth, 5),
deconv(2 * base_depth, 5, 2),
deconv(base_depth, 5),
deconv(base_depth, 5, 2),
deconv(base_depth, 5),
conv(output_shape[-1], 5, activation=None),
])
def decoder(codes):
original_shape = tf.shape(input=codes)
# Collapse the sample and batch dimension and convert to rank-4 tensor for
# use with a convolutional decoder network.
codes = tf.reshape(codes, (-1, 1, 1, latent_size))
logits = decoder_net(codes)
logits = tf.reshape(
logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0))
return tfd.Independent(tfd.Bernoulli(logits=logits),
reinterpreted_batch_ndims=len(output_shape),
name="image")
return decoder | Creates the decoder function.
Args:
activation: Activation function in hidden layers.
latent_size: Dimensionality of the encoding.
output_shape: The output image shape.
base_depth: Smallest depth for a layer.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over images. |
def options(self):
'''
Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request
'''
# http request
response = self.repo.api.http_request('OPTIONS', self.uri)
return response.headers | Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request |
def decode_packet(data):
"""decode the data, return some kind of PDU."""
if _debug: decode_packet._debug("decode_packet %r", data)
# empty strings are some other kind of pcap content
if not data:
return None
# assume it is ethernet for now
d = decode_ethernet(data)
pduSource = Address(d['source_address'])
pduDestination = Address(d['destination_address'])
data = d['data']
# there could be a VLAN header
if (d['type'] == 0x8100):
if _debug: decode_packet._debug(" - vlan found")
d = decode_vlan(data)
data = d['data']
# look for IP packets
if (d['type'] == 0x0800):
if _debug: decode_packet._debug(" - IP found")
d = decode_ip(data)
pduSource, pduDestination = d['source_address'], d['destination_address']
data = d['data']
if (d['protocol'] == 'udp'):
if _debug: decode_packet._debug(" - UDP found")
d = decode_udp(data)
data = d['data']
pduSource = Address((pduSource, d['source_port']))
pduDestination = Address((pduDestination, d['destination_port']))
if _debug:
decode_packet._debug(" - pduSource: %r", pduSource)
decode_packet._debug(" - pduDestination: %r", pduDestination)
else:
if _debug: decode_packet._debug(" - not a UDP packet")
else:
if _debug: decode_packet._debug(" - not an IP packet")
# check for empty
if not data:
if _debug: decode_packet._debug(" - empty packet")
return None
# build a PDU
pdu = PDU(data, source=pduSource, destination=pduDestination)
# check for a BVLL header
if (pdu.pduData[0] == 0x81):
if _debug: decode_packet._debug(" - BVLL header found")
try:
xpdu = BVLPDU()
xpdu.decode(pdu)
pdu = xpdu
except Exception as err:
if _debug: decode_packet._debug(" - BVLPDU decoding error: %r", err)
return pdu
# make a more focused interpretation
atype = bvl_pdu_types.get(pdu.bvlciFunction)
if not atype:
if _debug: decode_packet._debug(" - unknown BVLL type: %r", pdu.bvlciFunction)
return pdu
# decode it as one of the basic types
try:
xpdu = pdu
bpdu = atype()
bpdu.decode(pdu)
if _debug: decode_packet._debug(" - bpdu: %r", bpdu)
pdu = bpdu
# lift the address for forwarded NPDU's
if atype is ForwardedNPDU:
pdu.pduSource = bpdu.bvlciAddress
# no deeper decoding for some
elif atype not in (DistributeBroadcastToNetwork, OriginalUnicastNPDU, OriginalBroadcastNPDU):
return pdu
except Exception as err:
if _debug: decode_packet._debug(" - decoding Error: %r", err)
return xpdu
# check for version number
if (pdu.pduData[0] != 0x01):
if _debug: decode_packet._debug(" - not a version 1 packet: %s...", btox(pdu.pduData[:30], '.'))
return None
# it's an NPDU
try:
npdu = NPDU()
npdu.decode(pdu)
except Exception as err:
if _debug: decode_packet._debug(" - decoding Error: %r", err)
return None
# application or network layer message
if npdu.npduNetMessage is None:
if _debug: decode_packet._debug(" - not a network layer message, try as an APDU")
# decode as a generic APDU
try:
xpdu = APDU()
xpdu.decode(npdu)
apdu = xpdu
except Exception as err:
if _debug: decode_packet._debug(" - decoding Error: %r", err)
return npdu
# "lift" the source and destination address
if npdu.npduSADR:
apdu.pduSource = npdu.npduSADR
else:
apdu.pduSource = npdu.pduSource
if npdu.npduDADR:
apdu.pduDestination = npdu.npduDADR
else:
apdu.pduDestination = npdu.pduDestination
# make a more focused interpretation
atype = apdu_types.get(apdu.apduType)
if not atype:
if _debug: decode_packet._debug(" - unknown APDU type: %r", apdu.apduType)
return apdu
# decode it as one of the basic types
try:
xpdu = apdu
apdu = atype()
apdu.decode(xpdu)
except Exception as err:
if _debug: decode_packet._debug(" - decoding Error: %r", err)
return xpdu
# decode it at the next level
if isinstance(apdu, ConfirmedRequestPDU):
atype = confirmed_request_types.get(apdu.apduService)
if not atype:
if _debug: decode_packet._debug(" - no confirmed request decoder: %r", apdu.apduService)
return apdu
elif isinstance(apdu, UnconfirmedRequestPDU):
atype = unconfirmed_request_types.get(apdu.apduService)
if not atype:
if _debug: decode_packet._debug(" - no unconfirmed request decoder: %r", apdu.apduService)
return apdu
elif isinstance(apdu, SimpleAckPDU):
atype = None
elif isinstance(apdu, ComplexAckPDU):
atype = complex_ack_types.get(apdu.apduService)
if not atype:
if _debug: decode_packet._debug(" - no complex ack decoder: %r", apdu.apduService)
return apdu
elif isinstance(apdu, SegmentAckPDU):
atype = None
elif isinstance(apdu, ErrorPDU):
atype = error_types.get(apdu.apduService)
if not atype:
if _debug: decode_packet._debug(" - no error decoder: %r", apdu.apduService)
return apdu
elif isinstance(apdu, RejectPDU):
atype = None
elif isinstance(apdu, AbortPDU):
atype = None
if _debug: decode_packet._debug(" - atype: %r", atype)
# deeper decoding
try:
if atype:
xpdu = apdu
apdu = atype()
apdu.decode(xpdu)
except Exception as err:
if _debug: decode_packet._debug(" - decoding error: %r", err)
return xpdu
# success
return apdu
else:
# make a more focused interpretation
ntype = npdu_types.get(npdu.npduNetMessage)
if not ntype:
if _debug: decode_packet._debug(" - no network layer decoder: %r", npdu.npduNetMessage)
return npdu
if _debug: decode_packet._debug(" - ntype: %r", ntype)
# deeper decoding
try:
xpdu = npdu
npdu = ntype()
npdu.decode(xpdu)
except Exception as err:
if _debug: decode_packet._debug(" - decoding error: %r", err)
return xpdu
# success
return npdu | decode the data, return some kind of PDU. |
def get_time_interval(time1, time2):
'''get the interval of two times'''
try:
#convert time to timestamp
time1 = time.mktime(time.strptime(time1, '%Y/%m/%d %H:%M:%S'))
time2 = time.mktime(time.strptime(time2, '%Y/%m/%d %H:%M:%S'))
seconds = (datetime.datetime.fromtimestamp(time2) - datetime.datetime.fromtimestamp(time1)).seconds
#convert seconds to day:hour:minute:second
days = seconds / 86400
seconds %= 86400
hours = seconds / 3600
seconds %= 3600
minutes = seconds / 60
seconds %= 60
return '%dd %dh %dm %ds' % (days, hours, minutes, seconds)
except:
return 'N/A' | get the interval of two times |
def submatrix(dmat, indices_col, n_neighbors):
"""Return a submatrix given an orginal matrix and the indices to keep.
Parameters
----------
mat: array, shape (n_samples, n_samples)
Original matrix.
indices_col: array, shape (n_samples, n_neighbors)
Indices to keep. Each row consists of the indices of the columns.
n_neighbors: int
Number of neighbors.
Returns
-------
submat: array, shape (n_samples, n_neighbors)
The corresponding submatrix.
"""
n_samples_transform, n_samples_fit = dmat.shape
submat = np.zeros((n_samples_transform, n_neighbors), dtype=dmat.dtype)
for i in numba.prange(n_samples_transform):
for j in numba.prange(n_neighbors):
submat[i, j] = dmat[i, indices_col[i, j]]
return submat | Return a submatrix given an orginal matrix and the indices to keep.
Parameters
----------
mat: array, shape (n_samples, n_samples)
Original matrix.
indices_col: array, shape (n_samples, n_neighbors)
Indices to keep. Each row consists of the indices of the columns.
n_neighbors: int
Number of neighbors.
Returns
-------
submat: array, shape (n_samples, n_neighbors)
The corresponding submatrix. |
def _gte(field, value, document):
"""
Returns True if the value of a document field is greater than or
equal to a given value
"""
try:
return document.get(field, None) >= value
except TypeError: # pragma: no cover Python < 3.0
return False | Returns True if the value of a document field is greater than or
equal to a given value |
def _init_args(self):
"""Get enrichment arg parser."""
#pylint: disable=invalid-name
p = argparse.ArgumentParser(__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('filenames', type=str, nargs=3,
help='data/study data/population data/association')
p.add_argument('--annofmt', default=None, type=str,
help=('Annotation file format. '
'Not needed if type can be determined using filename'),
choices=['gene2go', 'gaf', 'gpad', 'id2gos'])
p.add_argument('--taxid', default=9606, type=int,
help="When using NCBI's gene2go annotation file, specify desired taxid")
p.add_argument('--alpha', default=0.05, type=float,
help='Test-wise alpha for multiple testing')
p.add_argument('--pval', default=.05, type=float,
help='Only print results with uncorrected p-value < PVAL.')
p.add_argument('--pval_field', type=str,
help='Only print results when PVAL_FIELD < PVAL.')
p.add_argument('--outfile', default=None, type=str,
help='Write enrichment results into xlsx or tsv file')
p.add_argument('--id2sym', default=None, type=str,
help='ASCII file containing one geneid and its symbol per line')
p.add_argument('--sections', default=None, type=str,
help=('Use sections file for printing grouped GOEA results. '
'Example SECTIONS values:\n'
'goatools.test_data.sections.gjoneska_pfenning \n'
'goatools/test_data/sections/gjoneska_pfenning.py \n'
'data/gjoneska_pfenning/sections_in.txt\n'))
p.add_argument('--outfile_detail', type=str,
help=('Write enrichment results into a text file \n'
'containing the following information: \n'
'1) GOEA GO terms, grouped into sections \n\n'
'2) List of genes and ASCII art showing section membership \n'
'3) Detailed list of each gene and GO terms w/their P-values \n'))
p.add_argument('--compare', dest='compare', default=False,
action='store_true',
help="the population file as a comparison group. if this "
"flag is specified, the population is used as the study "
"plus the `population/comparison`")
p.add_argument('--ratio', dest='ratio', type=float, default=None,
help="only show values where the difference between study "
"and population ratios is greater than this. useful for "
"excluding GO categories with small differences, but "
"containing large numbers of genes. should be a value "
"between 1 and 2. ")
p.add_argument('--indent', dest='indent', default=False,
action='store_true', help="indent GO terms")
p.add_argument('--obo', default="go-basic.obo", type=str,
help="Specifies location and name of the obo file")
p.add_argument('--no_propagate_counts', default=False, action='store_true',
help="Do not propagate counts to parent terms")
p.add_argument('--method', default="bonferroni,sidak,holm,fdr_bh", type=str,
help=Methods().getmsg_valid_methods())
p.add_argument('--pvalcalc', default="fisher", type=str,
help=str(FisherFactory()))
p.add_argument('--min_overlap', default=0.7, type=float,
help="Check that a minimum amount of study genes are in the population")
p.add_argument('--goslim', default='goslim_generic.obo', type=str,
help="The GO slim file is used when grouping GO terms.")
p.add_argument('--ev_inc', type=str,
help="Include specified evidence codes and groups separated by commas")
p.add_argument('--ev_exc', type=str,
help="Exclude specified evidence codes and groups separated by commas")
p.add_argument('--ev_help', dest='ev_help', action='store_false',
help="Print all Evidence codes")
if len(sys.argv) == 1:
sys.exit(not p.print_help())
if '--ev_help' in sys.argv:
print('\nEVIDENCE CODE HELP: --ev_exc --ev_inc')
print('Use any of these group names, ')
print('like Experimental or Similarity or Experimental,Similarity,')
print('or evidence codes, like IEA or ISS,ISO,ISA in --ev_exc or --ev_inc:\n')
obj = EvidenceCodes()
obj.prt_details()
sys.exit(0)
args = p.parse_args() # Namespace object from argparse
self._check_input_files(args, p)
return args | Get enrichment arg parser. |
def run(self):
"""Invoke the function repeatedly on a timer."""
ret = eventlet.spawn(self.context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self.thread = eventlet.spawn(self.run) | Invoke the function repeatedly on a timer. |
def filter_with_schema(self, model=None, context=None):
""" Perform model filtering with schema """
if model is None or self.schema is None:
return
self._schema.filter(
model=model,
context=context if self.use_context else None
) | Perform model filtering with schema |
def list_all_by_reqvip(self, id_vip, pagination):
"""
List All Pools To Populate Datatable
:param pagination: Object Pagination
:return: Following dictionary:{
"total" : < total >,
"pools" :[{
"id": < id >
"default_port": < default_port >,
"identifier": < identifier >,
"healthcheck": < healthcheck >,
}, ... too ... ]}
:raise NetworkAPIException: Falha ao acessar fonte de dados
"""
uri = "api/pools/pool_list_by_reqvip/"
data = dict()
data["start_record"] = pagination.start_record
data["end_record"] = pagination.end_record
data["asorting_cols"] = pagination.asorting_cols
data["searchable_columns"] = pagination.searchable_columns
data["custom_search"] = pagination.custom_search or None
data["id_vip"] = id_vip or None
return self.post(uri, data=data) | List All Pools To Populate Datatable
:param pagination: Object Pagination
:return: Following dictionary:{
"total" : < total >,
"pools" :[{
"id": < id >
"default_port": < default_port >,
"identifier": < identifier >,
"healthcheck": < healthcheck >,
}, ... too ... ]}
:raise NetworkAPIException: Falha ao acessar fonte de dados |
def setup_logging():
"""Called when __name__ == '__main__' below. Sets up logging library.
All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages.
"""
fmt = 'DBG<0>%(pathname)s:%(lineno)d %(funcName)s: %(message)s'
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setFormatter(logging.Formatter(fmt))
if OPTIONS['--verbose'] == 1:
handler_stderr.addFilter(logging.Filter(__name__))
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(handler_stderr) | Called when __name__ == '__main__' below. Sets up logging library.
All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages. |
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s) | Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True |
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0
"""
# TODO: hrm should the run-related things actually be derived from the
# runner_class? E.g. Local defines local stuff, Remote defines remote
# stuff? Doesn't help with the final config tree tho...
# TODO: as to that, this is a core problem, Fabric wants split
# local/remote stuff, eg replace_env wants to be False for local and
# True remotely; shell wants to differ depending on target (and either
# way, does not want to use local interrogation for remote)
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
"connect_kwargs": {},
"forward_agent": False,
"gateway": None,
"load_ssh_configs": True,
"port": 22,
"run": {"replace_env": True},
"runners": {"remote": Remote},
"ssh_config_path": None,
"tasks": {"collection_name": "fabfile"},
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
"timeouts": {"connect": None},
"user": get_local_user(),
}
merge_dicts(defaults, ours)
return defaults | Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0 |
def reward(self, action=None):
"""
Reward function for the task.
The dense reward has three components.
Reaching: in [0, 1], to encourage the arm to reach the cube
Grasping: in {0, 0.25}, non-zero if arm is grasping the cube
Lifting: in {0, 1}, non-zero if arm has lifted the cube
The sparse reward only consists of the lifting component.
Args:
action (np array): unused for this task
Returns:
reward (float): the reward
"""
reward = 0.
# sparse completion reward
if self._check_success():
reward = 1.0
# use a shaping reward
if self.reward_shaping:
# reaching reward
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dist = np.linalg.norm(gripper_site_pos - cube_pos)
reaching_reward = 1 - np.tanh(10.0 * dist)
reward += reaching_reward
# grasping reward
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id:
touch_left_finger = True
if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id:
touch_right_finger = True
if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
if touch_left_finger and touch_right_finger:
reward += 0.25
return reward | Reward function for the task.
The dense reward has three components.
Reaching: in [0, 1], to encourage the arm to reach the cube
Grasping: in {0, 0.25}, non-zero if arm is grasping the cube
Lifting: in {0, 1}, non-zero if arm has lifted the cube
The sparse reward only consists of the lifting component.
Args:
action (np array): unused for this task
Returns:
reward (float): the reward |
def SendVoicemail(self, Username):
"""Sends a voicemail to a specified user.
:Parameters:
Username : str
Skypename of the user.
:note: Should return a `Voicemail` object. This is not implemented yet.
"""
if self._Api.protocol >= 6:
self._DoCommand('CALLVOICEMAIL %s' % Username)
else:
self._DoCommand('VOICEMAIL %s' % Username) | Sends a voicemail to a specified user.
:Parameters:
Username : str
Skypename of the user.
:note: Should return a `Voicemail` object. This is not implemented yet. |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None,
supported_tags=None, fake_daily_files_from_monthly=False,
two_digit_year_break=None):
"""Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>. (default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are tags supported by list_files routine. Values are the
default format_str values for key. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b':fname}
list_files = functools.partial(nasa_cdaweb_methods.list_files,
supported_tags=supported_tags)
ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'':ivm_fname}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
"""
if data_path is not None:
if format_str is None:
try:
format_str = supported_tags[sat_id][tag]
except KeyError:
raise ValueError('Unknown tag')
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if (not out.empty) and fake_daily_files_from_monthly:
out.ix[out.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
return out
else:
estr = 'A directory must be passed to the loading routine for <Instrument Code>'
raise ValueError (estr) | Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>. (default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are tags supported by list_files routine. Values are the
default format_str values for key. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b':fname}
list_files = functools.partial(nasa_cdaweb_methods.list_files,
supported_tags=supported_tags)
ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'':ivm_fname}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags) |
def read(self, to_read, timeout_ms):
"""Reads data from this file.
in to_read of type int
Number of bytes to read.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return data of type str
Array of data read.
raises :class:`OleErrorNotimpl`
The method is not implemented yet.
"""
if not isinstance(to_read, baseinteger):
raise TypeError("to_read can only be an instance of type baseinteger")
if not isinstance(timeout_ms, baseinteger):
raise TypeError("timeout_ms can only be an instance of type baseinteger")
data = self._call("read",
in_p=[to_read, timeout_ms])
return data | Reads data from this file.
in to_read of type int
Number of bytes to read.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return data of type str
Array of data read.
raises :class:`OleErrorNotimpl`
The method is not implemented yet. |
def _split_symbol_mappings(df, exchanges):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
with pd.option_context('mode.chained_assignment', None):
mappings['sid'] = mappings.index
mappings.reset_index(drop=True, inplace=True)
# take the most recent sid->exchange mapping based on end date
asset_exchange = df[
['exchange', 'end_date']
].sort_values('end_date').groupby(level=0)['exchange'].nth(-1)
_check_symbol_mappings(mappings, exchanges, asset_exchange)
return (
df.groupby(level=0).apply(_check_asset_group),
mappings,
) | Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date. |
def sleep_and_retry(func):
'''
Return a wrapped function that rescues rate limit exceptions, sleeping the
current thread until rate limit resets.
:param function func: The function to decorate.
:return: Decorated function.
:rtype: function
'''
@wraps(func)
def wrapper(*args, **kargs):
'''
Call the rate limited function. If the function raises a rate limit
exception sleep for the remaing time period and retry the function.
:param args: non-keyword variable length argument list to the decorated function.
:param kargs: keyworded variable length argument list to the decorated function.
'''
while True:
try:
return func(*args, **kargs)
except RateLimitException as exception:
time.sleep(exception.period_remaining)
return wrapper | Return a wrapped function that rescues rate limit exceptions, sleeping the
current thread until rate limit resets.
:param function func: The function to decorate.
:return: Decorated function.
:rtype: function |
def filter_and_transform_data(df, settings):
'''
Perform filtering on the data based on arguments set on commandline
- use aligned length or sequenced length (bam mode only)
- hide outliers from length plots*
- hide reads longer than maxlength or shorter than minlength from length plots*
- filter reads with a quality below minqual
- use log10 scaled reads rather than normal
- use empirical percent accuracy rather than phred score quality
- downsample reads to args.downsample
- always: drop reads which are basecaller artefacts
judged by length below 20 and quality above 30
* using a boolean column length_filter
'''
df["length_filter"] = True
settings["filtered"] = False
if settings.get("alength") and settings.get("bam"):
settings["lengths_pointer"] = "aligned_lengths"
logging.info("Using aligned read lengths for plotting.")
else:
settings["lengths_pointer"] = "lengths"
logging.info("Using sequenced read lengths for plotting.")
if settings.get("drop_outliers"):
num_reads_prior = non_filtered_reads(df)
df.loc[flag_length_outliers(df, settings["lengths_pointer"]), "length_filter"] = False
num_reads_post = non_filtered_reads(df)
logging.info("Hidding {} length outliers in length plots.".format(
str(num_reads_prior - num_reads_post)))
if settings.get("maxlength"):
num_reads_prior = non_filtered_reads(df)
df.loc[df[settings["lengths_pointer"]] > settings["maxlength"], "length_filter"] = False
num_reads_post = non_filtered_reads(df)
logging.info("Hidding {} reads longer than {}bp in length plots.".format(
str(num_reads_prior - num_reads_post),
str(settings["maxlength"])))
if settings.get("minlength"):
num_reads_prior = non_filtered_reads(df)
df.loc[df[settings["lengths_pointer"]] < settings["minlength"], "length_filter"] = False
num_reads_post = non_filtered_reads(df)
logging.info("Hidding {} reads shorter than {}bp in length plots.".format(
str(num_reads_prior - num_reads_post),
str(settings["minlength"])))
if settings.get("minqual"):
num_reads_prior = non_filtered_reads(df)
df = df.loc[df["quals"] > settings["minqual"]].copy()
num_reads_post = non_filtered_reads(df)
logging.info("Removing {} reads with quality below Q{}.".format(
str(num_reads_prior - num_reads_post),
str(settings["minqual"])))
settings["filtered"] = True
if settings.get("loglength"):
df["log_" + settings["lengths_pointer"]] = np.log10(df[settings["lengths_pointer"]])
settings["lengths_pointer"] = "log_" + settings["lengths_pointer"]
logging.info("Using log10 scaled read lengths.")
settings["logBool"] = True
else:
settings["logBool"] = False
if settings.get("runtime_until"):
num_reads_prior = non_filtered_reads(df)
df = df[df.start_time < timedelta(hours=settings["runtime_until"])]
num_reads_post = non_filtered_reads(df)
logging.info("Removing {} reads generated after {} hours in the run.".format(
str(num_reads_prior - num_reads_post),
str(settings["runtime_until"])))
settings["filtered"] = True
if "quals" in df:
num_reads_prior = len(df)
df = df.loc[-((df["lengths"] < 20) & (df["quals"] > 30))].copy()
num_reads_post = len(df)
if num_reads_prior - num_reads_post > 0:
logging.info(
"Removed {} artefactual reads with very short length and very high quality."
.format(num_reads_prior - num_reads_post))
settings["filtered"] = True
if settings.get("downsample"):
new_size = min(settings["downsample"], len(df))
logging.info("Downsampling the dataset from {} to {} reads".format(
len(df), new_size))
df = df.sample(new_size)
settings["filtered"] = True
if settings.get("percentqual"):
df["quals"] = df["quals"].apply(phred_to_percent)
logging.info("Converting quality scores to theoretical percent identities.")
return(df, settings) | Perform filtering on the data based on arguments set on commandline
- use aligned length or sequenced length (bam mode only)
- hide outliers from length plots*
- hide reads longer than maxlength or shorter than minlength from length plots*
- filter reads with a quality below minqual
- use log10 scaled reads rather than normal
- use empirical percent accuracy rather than phred score quality
- downsample reads to args.downsample
- always: drop reads which are basecaller artefacts
judged by length below 20 and quality above 30
* using a boolean column length_filter |
def MatrixSolve(a, rhs, adj):
"""
Matrix solve op.
"""
return np.linalg.solve(a if not adj else _adjoint(a), rhs), | Matrix solve op. |
def validate(self, collection: BioCCollection):
"""Validate a single collection."""
for document in collection.documents:
self.validate_doc(document) | Validate a single collection. |
def _timeout_handler(self, signum, frame):
"""
internal timeout handler
"""
msgfmt = 'plugin timed out after {0} seconds'
self.exit(code=self._timeout_code,
message=msgfmt.format(self._timeout_delay)) | internal timeout handler |
def write(self, data):
"""Sends some data to the client."""
# I don't want to add a separate 'Client disconnected' logic for sending.
# Therefore I just ignore any writes after the first error - the server
# won't send that much data anyway. Afterwards the read will detect the
# broken connection and we quit.
if self._ignore_write_operations:
return
assert self.is_connected()
try:
self._connection.send(data.encode('ascii'))
except socket.error:
self.close()
self._ignore_write_operations = True | Sends some data to the client. |
def _read_configuration(config_filename):
"""
Checks the supplement file.
:param str config_filename: The name of the configuration file.
:rtype: (configparser.ConfigParser,configparser.ConfigParser)
"""
config = ConfigParser()
config.read(config_filename)
if 'supplement' in config['database']:
path = os.path.dirname(config_filename) + '/' + config.get('database', 'supplement')
config_supplement = ConfigParser()
config_supplement.read(path)
else:
config_supplement = None
return config, config_supplement | Checks the supplement file.
:param str config_filename: The name of the configuration file.
:rtype: (configparser.ConfigParser,configparser.ConfigParser) |
Subsets and Splits