text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def parse_reply(self, data):
"""Deserializes and validates a response.
Called by the client to reconstruct the serialized :py:class:`JSONRPCResponse`.
:param bytes data: The data stream received by the transport layer containing the
serialized request.
:return: A reconstructed response.
:rtype: :py:class:`JSONRPCSuccessResponse` or :py:class:`JSONRPCErrorResponse`
:raises InvalidReplyError: if the response is not valid JSON or does not conform
to the standard.
"""
if isinstance(data, bytes):
data = data.decode()
try:
rep = json.loads(data)
except Exception as e:
raise InvalidReplyError(e)
for k in rep.keys():
if not k in self._ALLOWED_REPLY_KEYS:
raise InvalidReplyError('Key not allowed: %s' % k)
if 'jsonrpc' not in rep:
raise InvalidReplyError('Missing jsonrpc (version) in response.')
if rep['jsonrpc'] != self.JSON_RPC_VERSION:
raise InvalidReplyError('Wrong JSONRPC version')
if 'id' not in rep:
raise InvalidReplyError('Missing id in response')
if ('error' in rep) and ('result' in rep):
raise InvalidReplyError(
'Reply must contain exactly one of result and error.'
)
if 'error' in rep:
response = JSONRPCErrorResponse()
error = rep['error']
response.error = error["message"]
response._jsonrpc_error_code = error["code"]
if "data" in error:
response.data = error["data"]
else:
response = JSONRPCSuccessResponse()
response.result = rep.get('result', None)
response.unique_id = rep['id']
return response | 0.003796 |
def access_level_up_to(self, access_level):
""" returns all items that have an access level equal or lower than the one specified """
# if access_level is number
if isinstance(access_level, int):
value = access_level
# else if is string get the numeric value
else:
value = ACCESS_LEVELS.get(access_level)
# return queryset
return self.filter(access_level__lte=value) | 0.006726 |
def show_worst_drawdown_periods(returns, top=5):
"""
Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
"""
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
utils.print_table(
drawdown_df.sort_values('Net drawdown in %', ascending=False),
name='Worst drawdown periods',
float_format='{0:.2f}'.format,
) | 0.001425 |
def containsSettingsGroup(groupName, settings=None):
""" Returns True if the settings contain a group with the name groupName.
Works recursively when the groupName is a slash separated path.
"""
def _containsPath(path, settings):
"Aux function for containsSettingsGroup. Does the actual recursive search."
if len(path) == 0:
return True
else:
head = path[0]
tail = path[1:]
if head not in settings.childGroups():
return False
else:
settings.beginGroup(head)
try:
return _containsPath(tail, settings)
finally:
settings.endGroup()
# Body starts here
path = os.path.split(groupName)
logger.debug("Looking for path: {}".format(path))
settings = QtCore.QSettings() if settings is None else settings
return _containsPath(path, settings) | 0.002086 |
def rename_property(self, old, new):
"""Replace the name of a property by a new one."""
self._properties.replace(old, new)
pairs = self._pairs
pairs |= {(o, new) for o in self._objects
if (o, old) in pairs and not pairs.remove((o, old))} | 0.010676 |
def get_event_triggers(self):
"""
Returns dict of supported events.
Key = Event Type
List = Channels that have that event activated
"""
events = {}
nvrflag = False
event_xml = []
url = '%s/ISAPI/Event/triggers' % self.root_url
try:
response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)
if response.status_code == requests.codes.not_found:
# Try alternate URL for triggers
_LOGGING.debug('Using alternate triggers URL.')
url = '%s/Event/triggers' % self.root_url
response = self.hik_request.get(url)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch events, error: %s', err)
return None
if response.status_code != 200:
# If we didn't recieve 200, abort
return None
# pylint: disable=too-many-nested-blocks
try:
content = ET.fromstring(response.text)
if content[0].find(self.element_query('EventTrigger')):
event_xml = content[0].findall(
self.element_query('EventTrigger'))
elif content.find(self.element_query('EventTrigger')):
# This is either an NVR or a rebadged camera
event_xml = content.findall(
self.element_query('EventTrigger'))
for eventtrigger in event_xml:
ettype = eventtrigger.find(self.element_query('eventType'))
# Catch empty xml defintions
if ettype is None:
break
etnotify = eventtrigger.find(
self.element_query('EventTriggerNotificationList'))
etchannel = None
etchannel_num = 0
for node_name in CHANNEL_NAMES:
etchannel = eventtrigger.find(
self.element_query(node_name))
if etchannel is not None:
try:
# Need to make sure this is actually a number
etchannel_num = int(etchannel.text)
if etchannel_num > 1:
# Must be an nvr
nvrflag = True
break
except ValueError:
# Field must not be an integer
pass
if etnotify:
for notifytrigger in etnotify:
ntype = notifytrigger.find(
self.element_query('notificationMethod'))
if ntype.text == 'center' or ntype.text == 'HTTP':
"""
If we got this far we found an event that we want
to track.
"""
events.setdefault(ettype.text, []) \
.append(etchannel_num)
except (AttributeError, ET.ParseError) as err:
_LOGGING.error(
'There was a problem finding an element: %s', err)
return None
if nvrflag:
self.device_type = NVR_DEVICE
else:
self.device_type = CAM_DEVICE
_LOGGING.debug('Processed %s as %s Device.',
self.cam_id, self.device_type)
_LOGGING.debug('Found events: %s', events)
self.hik_request.close()
return events | 0.000542 |
def get_mx_records(domain):
"""
Gets an array of MXRecords associated to the domain specified.
:param domain:
:return: [MXRecord]
"""
DNS.DiscoverNameServers()
request = DNS.Request()
response = request.req(name=domain, qtype=DNS.Type.MX)
mx_records = []
for answer in response.answers:
mx_records.append(MXRecord(priority=answer['data'][0], exchange=answer['data'][1], domain=domain))
return sorted(mx_records, key=lambda record: record.priority) | 0.005445 |
def print_markdown(data, title=None):
"""Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
"""
markdown = []
for key, value in data.items():
if isinstance(value, basestring_) and Path(value).exists():
continue
markdown.append("* **{}:** {}".format(key, unicode_(value)))
if title:
print("\n## {}".format(title))
print("\n{}\n".format("\n".join(markdown))) | 0.001815 |
def showDecidePage(request, openid_request):
"""
Render a page to the user so a trust decision can be made.
@type openid_request: openid.server.server.CheckIDRequest
"""
trust_root = openid_request.trust_root
return_to = openid_request.return_to
try:
# Stringify because template's ifequal can only compare to strings.
trust_root_valid = verifyReturnTo(trust_root, return_to) \
and "Valid" or "Invalid"
except DiscoveryFailure, err:
trust_root_valid = "DISCOVERY_FAILED"
except HTTPFetchingError, err:
trust_root_valid = "Unreachable"
pape_request = pape.Request.fromOpenIDRequest(openid_request)
return direct_to_template(
request,
'server/trust.html',
{'trust_root': trust_root,
'trust_handler_url':getViewURL(request, processTrustResult),
'trust_root_valid': trust_root_valid,
'pape_request': pape_request,
}) | 0.002043 |
def _locate_files_to_delete(algorithm, rotated_files, next_rotation_id):
"""Looks for hanoi_rotator generated files that occupy the same slot
that will be given to rotation_id.
"""
rotation_slot = algorithm.id_to_slot(next_rotation_id)
for a_path, a_rotation_id in rotated_files:
if rotation_slot == algorithm.id_to_slot(a_rotation_id):
yield a_path | 0.002571 |
def hard_limit_remote(self,
yidx,
ridx,
rtype='y',
rmin=None,
rmax=None,
min_yset=0,
max_yset=0):
"""Limit the output of yidx if the remote y is not within the limits
This function needs to be modernized.
"""
ny = len(yidx)
assert ny == len(
ridx), "Length of output vars and remote vars does not match"
assert rtype in ('x',
'y'), "ridx must be either y (algeb) or x (state)"
if isinstance(min_yset, (int, float)):
min_yset = matrix(min_yset, (ny, 1), 'd')
if isinstance(max_yset, (int, float)):
max_yset = matrix(max_yset, (ny, 1), 'd')
above_idx, below_idx = list(), list()
yidx = matrix(yidx)
if rmax:
# find the over-limit remote idx
above = ageb(self.__dict__[rtype][ridx], rmax)
above_idx = index(above, 1.0)
# reset the y values based on the remote limit violations
self.y[yidx[above_idx]] = max_yset[above_idx]
self.zymax[yidx[above_idx]] = 0
if rmin:
below = aleb(self.__dict__[rtype][ridx], rmin)
below_idx = index(below, 1.0)
self.y[yidx[below_idx]] = min_yset[below_idx]
self.zymin[yidx[below_idx]] = 0
idx = above_idx + below_idx
self.g[yidx[idx]] = 0
if len(idx) > 0:
self.factorize = True | 0.005604 |
def fft_bandpassfilter(data, fs, lowcut, highcut):
"""
http://www.swharden.com/blog/2009-01-21-signal-filtering-with-python/#comment-16801
"""
fft = np.fft.fft(data)
# n = len(data)
# timestep = 1.0 / fs
# freq = np.fft.fftfreq(n, d=timestep)
bp = fft.copy()
# Zero out fft coefficients
# bp[10:-10] = 0
# Normalise
# bp *= real(fft.dot(fft))/real(bp.dot(bp))
bp *= fft.dot(fft) / bp.dot(bp)
# must multipy by 2 to get the correct amplitude
ibp = 12 * np.fft.ifft(bp)
return ibp | 0.001832 |
def show_release_file(root, request):
"""
Download a release file.
Must be used with :func:`pyshop.helpers.download.renderer_factory`
to download the release file.
:return: download informations
:rtype: dict
"""
settings = request.registry.settings
whlify = asbool(settings.get('pyshop.mirror.wheelify', '0'))
session = DBSession()
f = ReleaseFile.by_id(session, int(request.matchdict['file_id']))
whlify = whlify and f.package_type == 'sdist'
filename = f.filename_whlified if whlify else f.filename
url = f.url
if url and url.startswith('http://pypi.python.org'):
url = 'https' + url[4:]
rv = {'url': url,
'filename': filename,
'original': f.filename,
'whlify': whlify
}
f.downloads += 1
f.release.downloads += 1
f.release.package.downloads += 1
session.add(f.release.package)
session.add(f.release)
session.add(f)
request.response.etag = f.md5_digest
request.response.cache_control = 'max-age=31557600, public'
request.response.date = datetime.datetime.utcnow()
return rv | 0.000885 |
def simxSetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode) | 0.012821 |
def getLocalIPaddress():
"""visible to other machines on LAN"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
my_local_ip = s.getsockname()[0] # takes ~0.005s
#from netifaces import interfaces, ifaddresses, AF_INET
#full solution in the event of multiple NICs (network interface cards) on the PC
#def ip4_addresses():
# ip_list = []
# for interface in interfaces():
# for link in ifaddresses(interface)[AF_INET]: # If IPv6 addresses are needed instead, use AF_INET6 instead of AF_INET
# ip_list.append(link['addr'])
# return ip_list
except Exception:
my_local_ip = None
return my_local_ip | 0.009115 |
async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
"""
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
"""
LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
if not self.wallet.handle:
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not ok_rev_reg_id(rr_id):
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_size = rr_size or 64
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
dir_tails = self.dir_tails_top(rr_id)
dir_target = self.dir_tails_target(rr_id)
if self.external:
try:
makedirs(dir_target, exist_ok=False)
except FileExistsError:
LOGGER.warning(
'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
dir_target,
rr_id)
rmtree(dir_target)
makedirs(dir_target, exist_ok=False)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': dir_target,
'uri_pattern': ''
}))
(created_rr_id, rr_def_json, rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_BY_DEFAULT'
}),
tails_writer_handle)
tails_hash = basename(Tails.unlinked(dir_target).pop())
with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
print(rr_def_json, file=rr_def_fh)
with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
print(rr_ent_json, file=rr_ent_fh)
Tails.associate(dir_tails, created_rr_id, tails_hash) # associate last: symlink signals completion
LOGGER.debug('RevRegBuilder.create_rev_reg <<<') | 0.004132 |
def get_canonical_key_id(self, key_id):
"""
get_canonical_key_id is used by get_canonical_key, see the comment
for that method for more explanation.
Keyword arguments:
key_id -- the key id (e.g. '12345')
returns the canonical key id (e.g. '12')
"""
shard_num = self.get_shard_num_by_key_id(key_id)
return self._canonical_keys[shard_num] | 0.004878 |
def autoencoder_residual_discrete_big():
"""Residual discrete autoencoder model, big version."""
hparams = autoencoder_residual_discrete()
hparams.hidden_size = 128
hparams.max_hidden_size = 4096
hparams.bottleneck_noise = 0.1
hparams.residual_dropout = 0.4
return hparams | 0.027972 |
def set_std(self, std):
"""Set the standard we'll be using (isupport CASEMAPPING)."""
if not hasattr(self, '_std'):
IMap.__init__(self)
# translation based on std
self._std = std.lower()
# set casemapping maps
self._set_transmaps()
# create translations
if self._lower_chars:
self._lower_trans = str.maketrans(self._upper_chars, self._lower_chars)
if self._upper_chars:
self._upper_trans = str.maketrans(self._lower_chars, self._upper_chars) | 0.007246 |
def residmap(self, prefix='', **kwargs):
"""Generate 2-D spatial residual maps using the current ROI
model and the convolution kernel defined with the `model`
argument.
Parameters
----------
prefix : str
String that will be prefixed to the output residual map files.
{options}
Returns
-------
maps : dict
A dictionary containing the `~fermipy.utils.Map` objects
for the residual significance and amplitude.
"""
timer = Timer.create(start=True)
self.logger.info('Generating residual maps')
schema = ConfigSchema(self.defaults['residmap'])
config = schema.create_config(self.config['residmap'], **kwargs)
# Defining default properties of test source model
config['model'].setdefault('Index', 2.0)
config['model'].setdefault('SpectrumType', 'PowerLaw')
config['model'].setdefault('SpatialModel', 'PointSource')
config['model'].setdefault('Prefactor', 1E-13)
o = self._make_residual_map(prefix, **config)
if config['make_plots']:
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.make_residmap_plots(o, self.roi)
self.logger.info('Finished residual maps')
outfile = utils.format_filename(self.workdir, 'residmap',
prefix=[o['name']])
if config['write_fits']:
o['file'] = os.path.basename(outfile) + '.fits'
self._make_residmap_fits(o, outfile + '.fits')
if config['write_npy']:
np.save(outfile + '.npy', o)
self.logger.info('Execution time: %.2f s', timer.elapsed_time)
return o | 0.001558 |
def process_settings(pelicanobj):
"""Sets user specified MathJax settings (see README for more details)"""
mathjax_settings = {}
# NOTE TO FUTURE DEVELOPERS: Look at the README and what is happening in
# this function if any additional changes to the mathjax settings need to
# be incorporated. Also, please inline comment what the variables
# will be used for
# Default settings
mathjax_settings['auto_insert'] = True # if set to true, it will insert mathjax script automatically into content without needing to alter the template.
mathjax_settings['align'] = 'center' # controls alignment of of displayed equations (values can be: left, right, center)
mathjax_settings['indent'] = '0em' # if above is not set to 'center', then this setting acts as an indent
mathjax_settings['show_menu'] = 'true' # controls whether to attach mathjax contextual menu
mathjax_settings['process_escapes'] = 'true' # controls whether escapes are processed
mathjax_settings['latex_preview'] = 'TeX' # controls what user sees while waiting for LaTex to render
mathjax_settings['color'] = 'inherit' # controls color math is rendered in
mathjax_settings['linebreak_automatic'] = 'false' # Set to false by default for performance reasons (see http://docs.mathjax.org/en/latest/output.html#automatic-line-breaking)
mathjax_settings['tex_extensions'] = '' # latex extensions that can be embedded inside mathjax (see http://docs.mathjax.org/en/latest/tex.html#tex-and-latex-extensions)
mathjax_settings['responsive'] = 'false' # Tries to make displayed math responsive
mathjax_settings['responsive_break'] = '768' # The break point at which it math is responsively aligned (in pixels)
mathjax_settings['mathjax_font'] = 'default' # forces mathjax to use the specified font.
mathjax_settings['process_summary'] = BeautifulSoup is not None # will fix up summaries if math is cut off. Requires beautiful soup
mathjax_settings['message_style'] = 'normal' # This value controls the verbosity of the messages in the lower left-hand corner. Set it to "none" to eliminate all messages
mathjax_settings['font_list'] = ['STIX', 'TeX'] # Include in order of preference among TeX, STIX-Web, Asana-Math, Neo-Euler, Gyre-Pagella, Gyre-Termes and Latin-Modern
mathjax_settings['equation_numbering'] = 'none' # AMS, auto, none
# Source for MathJax
mathjax_settings['source'] = "'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/latest.js?config=TeX-AMS-MML_HTMLorMML'"
# Get the user specified settings
try:
settings = pelicanobj.settings['MATH_JAX']
except:
settings = None
# If no settings have been specified, then return the defaults
if not isinstance(settings, dict):
return mathjax_settings
# The following mathjax settings can be set via the settings dictionary
for key, value in ((key, settings[key]) for key in settings):
# Iterate over dictionary in a way that is compatible with both version 2
# and 3 of python
if key == 'align':
typeVal = isinstance(value, string_type)
if not typeVal:
continue
if value == 'left' or value == 'right' or value == 'center':
mathjax_settings[key] = value
else:
mathjax_settings[key] = 'center'
if key == 'indent':
mathjax_settings[key] = value
if key == 'source':
mathjax_settings[key] = value
if key == 'show_menu' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'message_style':
mathjax_settings[key] = value if value is not None else 'none'
if key == 'auto_insert' and isinstance(value, bool):
mathjax_settings[key] = value
if key == 'process_escapes' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'latex_preview':
typeVal = isinstance(value, string_type)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'color':
typeVal = isinstance(value, string_type)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'linebreak_automatic' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'process_summary' and isinstance(value, bool):
if value and BeautifulSoup is None:
print("BeautifulSoup4 is needed for summaries to be processed by render_math\nPlease install it")
value = False
mathjax_settings[key] = value
if key == 'responsive' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'responsive_break' and isinstance(value, int):
mathjax_settings[key] = str(value)
if key == 'tex_extensions' and isinstance(value, list):
# filter string values, then add '' to them
value = filter(lambda string: isinstance(string, string_type), value)
value = map(lambda string: "'%s'" % string, value)
mathjax_settings[key] = ',' + ','.join(value)
if key == 'mathjax_font':
typeVal = isinstance(value, string_type)
if not typeVal:
continue
value = value.lower()
if value == 'sanserif':
value = 'SansSerif'
elif value == 'fraktur':
value = 'Fraktur'
elif value == 'typewriter':
value = 'Typewriter'
else:
value = 'default'
mathjax_settings[key] = value
if key == 'font_list' and isinstance(value, list):
# make an array string from the list
value = filter(lambda string: isinstance(string, string_type), value)
value = map(lambda string: ",'%s'" % string, value)
mathjax_settings[key] = ''.join(value)[1:]
if key == 'equation_numbering':
mathjax_settings[key] = value if value is not None else 'none'
return mathjax_settings | 0.00364 |
def get_devicelist(home_hub_ip='192.168.1.254'):
"""Retrieve data from BT Home Hub 5 and return parsed result.
"""
url = 'http://{}/'.format(home_hub_ip)
try:
response = requests.get(url, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if response.status_code == 200:
return parse_devicelist(response.text)
else:
_LOGGER.error("Invalid response from Home Hub: %s", response) | 0.001976 |
def get_pos(vcf_line):
"""
Very lightweight parsing of a vcf line to get position.
Returns a dict containing:
'chrom': index of chromosome (int), indicates sort order
'pos': position on chromosome (int)
"""
if not vcf_line:
return None
vcf_data = vcf_line.strip().split('\t')
return_data = dict()
return_data['chrom'] = CHROM_INDEX[vcf_data[0]]
return_data['pos'] = int(vcf_data[1])
return return_data | 0.003922 |
def add_peer_address(self, ext_bgp_peer_or_fw, rank=None):
"""
Add a peer address. Peer address types are ExternalBGPPeer
or Engine.
:raises ElementNotFound: If element specified does not exist
"""
if ext_bgp_peer_or_fw.typeof == 'external_bgp_peer':
ref = 'external_bgp_peer_address_ref'
else: # engine
ref = 'fwcluster_peer_address_ref'
self.conditions.append(
{ref: ext_bgp_peer_or_fw.href,
'rank': rank,
'type': 'peer_address'}) | 0.007055 |
def deactivate_resource(cls, id):
r"""
Used to deactivate a node of type 'cls' in response to a DELETE request. deactivate_resource should only \
be invoked on a resource when the client specifies a DELETE request.
:param id: The 'id' field of the node to update in the database. The id field must be set in the model -- it \
is not the same as the node id
:return: An HTTP response object in accordance with the specification at \
http://jsonapi.org/format/#crud-deleting
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
this_resource.deactivate()
r = make_response('')
r.headers['Content-Type'] = "application/vnd.api+json; charset=utf-8"
r.status_code = http_error_codes.NO_CONTENT
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r | 0.007261 |
def context(self):
"""
Create a context manager that ensures code runs within action's context.
The action does NOT finish when the context is exited.
"""
parent = _ACTION_CONTEXT.set(self)
try:
yield self
finally:
_ACTION_CONTEXT.reset(parent) | 0.009231 |
def sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options):
"""Creates a generator to send SNMP notification.
When iterator gets advanced by :py:mod:`asyncio` main loop,
SNMP TRAP or INFORM notification is send (:RFC:`1905#section-4.2.6`).
The iterator yields :py:class:`asyncio.Future` which gets done whenever
response arrives or error occurs.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asynio-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData`
Class instance representing SNMPv1/v2c credentials.
transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncio.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.v1arch.asyncio.Udp6TransportTarget` Class instance representing
transport type along with SNMP peer address.
notifyType : str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`,
unless :py:class:`~pysnmp.smi.rfc1902.ObjectType` or
:py:class:`~pysnmp.smi.rfc1902.NotificationType` is present
among `varBinds` in which case `lookupMib` gets automatically
enabled.
Yields
------
errorIndication: str
True value indicates SNMP engine error.
errorStatus: str
True value indicates SNMP PDU error.
errorIndex: int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds: tuple
A sequence of OID-value pairs in form of base SNMP types (if
`lookupMib` is `False`) or :py:class:`~pysnmp.smi.rfc1902.ObjectType`
class instances (if `lookupMib` is `True`) representing MIB variables
returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> import asyncio
>>> from pysnmp.hlapi.asyncio import *
>>>
>>> @asyncio.coroutine
... def run():
... errorIndication, errorStatus, errorIndex, varBinds = yield from sendNotification(
... SnmpDispatcher(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 162)),
... 'trap',
... NotificationType(ObjectIdentity('IF-MIB', 'linkDown')))
... print(errorIndication, errorStatus, errorIndex, varBinds)
...
>>> asyncio.get_event_loop().run_until_complete(run())
(None, 0, 0, [])
>>>
"""
sysUpTime = v2c.apiTrapPDU.sysUpTime
snmpTrapOID = v2c.apiTrapPDU.snmpTrapOID
def _ensureVarBinds(varBinds):
# Add sysUpTime if not present already
if not varBinds or varBinds[0][0] != sysUpTime:
varBinds.insert(0, (v2c.ObjectIdentifier(sysUpTime), v2c.TimeTicks(0)))
# Search for and reposition sysUpTime if it's elsewhere
for idx, varBind in enumerate(varBinds[1:]):
if varBind[0] == sysUpTime:
varBinds[0] = varBind
del varBinds[idx + 1]
break
if len(varBinds) < 2:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
# Search for and reposition snmpTrapOID if it's elsewhere
for idx, varBind in enumerate(varBinds[2:]):
if varBind[0] == snmpTrapOID:
del varBinds[idx + 2]
if varBinds[1][0] == snmpTrapOID:
varBinds[1] = varBind
else:
varBinds.insert(1, varBind)
break
# Fail on missing snmpTrapOID
if varBinds[1][0] != snmpTrapOID:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
return varBinds
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if future.cancelled():
return
errorStatus = v2c.apiTrapPDU.getErrorStatus(rspPdu)
errorIndex = v2c.apiTrapPDU.getErrorIndex(rspPdu)
varBinds = v2c.apiTrapPDU.getVarBinds(rspPdu)
try:
varBindsUnmade = VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, varBinds,
lookupMib)
except Exception as e:
future.set_exception(e)
else:
future.set_result(
(errorIndication, errorStatus, errorIndex, varBindsUnmade)
)
lookupMib = options.get('lookupMib')
if not lookupMib and any(isinstance(x, (NotificationType, ObjectType))
for x in varBinds):
lookupMib = True
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
if notifyType == 'trap':
reqPdu = v2c.TrapPDU()
else:
reqPdu = v2c.InformRequestPDU()
v2c.apiTrapPDU.setDefaults(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, varBinds)
varBinds = v2c.apiTrapPDU.getVarBinds(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(varBinds))
if authData.mpModel == 0:
reqPdu = rfc2576.v2ToV1(reqPdu)
future = asyncio.Future()
snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
if notifyType == 'trap':
def __trapFun(future):
if future.cancelled():
return
future.set_result((None, 0, 0, []))
loop = asyncio.get_event_loop()
loop.call_soon(__trapFun, future)
return future | 0.001337 |
def add_state(self, new_data, method='inc'):
"""Create new state and update related links and compressed state"""
self.sfx.append(0)
self.rsfx.append([])
self.trn.append([])
self.lrs.append(0)
# Experiment with pointer-based
self.f_array.add(new_data)
self.n_states += 1
i = self.n_states - 1
# assign new transition from state i-1 to i
self.trn[i - 1].append(i)
k = self.sfx[i - 1]
pi_1 = i - 1
# iteratively backtrack suffixes from state i-1
if method == 'inc':
suffix_candidate = 0
elif method == 'complete':
suffix_candidate = []
else:
suffix_candidate = 0
while k is not None:
if self.params['dfunc'] == 'other':
# dvec = self.dfunc_handle([new_data],
# self.f_array[self.trn[k]])[0]
dvec = dist.cdist([new_data],
self.f_array[self.trn[k]],
metric=self.params['dfunc_handle'])[0]
else:
dvec = dist.cdist([new_data],
self.f_array[self.trn[k]],
metric=self.params['dfunc'])[0]
I = np.where(dvec < self.params['threshold'])[0]
if len(I) == 0: # if no transition from suffix
self.trn[k].append(i) # Add new forward link to unvisited state
pi_1 = k
if method != 'complete':
k = self.sfx[k]
else:
if method == 'inc':
if I.shape[0] == 1:
suffix_candidate = self.trn[k][I[0]]
else:
suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]]
break
elif method == 'complete':
suffix_candidate.append((self.trn[k][I[np.argmin(dvec[I])]],
np.min(dvec)))
else:
suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]]
break
if method == 'complete':
k = self.sfx[k]
if method == 'complete':
if not suffix_candidate:
self.sfx[i] = 0
self.lrs[i] = 0
self.latent.append([i])
self.data.append(len(self.latent) - 1)
else:
sorted_suffix_candidates = sorted(suffix_candidate,
key=lambda suffix: suffix[1])
self.sfx[i] = sorted_suffix_candidates[0][0]
self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1
self.latent[self.data[self.sfx[i]]].append(i)
self.data.append(self.data[self.sfx[i]])
else:
if k is None:
self.sfx[i] = 0
self.lrs[i] = 0
self.latent.append([i])
self.data.append(len(self.latent) - 1)
else:
self.sfx[i] = suffix_candidate
self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1
self.latent[self.data[self.sfx[i]]].append(i)
self.data.append(self.data[self.sfx[i]])
# Temporary adjustment
k = self._find_better(i, self.data[i - self.lrs[i]])
if k is not None:
self.lrs[i] += 1
self.sfx[i] = k
self.rsfx[self.sfx[i]].append(i)
if self.lrs[i] > self.max_lrs[i - 1]:
self.max_lrs.append(self.lrs[i])
else:
self.max_lrs.append(self.max_lrs[i - 1])
self.avg_lrs.append(self.avg_lrs[i - 1] * ((i - 1.0) / (self.n_states - 1.0)) +
self.lrs[i] * (1.0 / (self.n_states - 1.0))) | 0.00203 |
def invoke(self, method, _this, **kwargs):
"""Invoke a method on the server.
>>> client.invoke('CurrentTime', client.si)
:param method: The method to invoke, as found in the SDK.
:type method: str
:param _this: The managed object reference against which to invoke \
the method.
:type _this: ManagedObject
:param kwargs: The arguments to pass to the method, as \
found in the SDK.
:type kwargs: TODO
"""
if (self._logged_in is False and
method not in ["Login", "RetrieveServiceContent"]):
logger.critical("Cannot exec %s unless logged in", method)
raise NotLoggedInError("Cannot exec %s unless logged in" % method)
for kwarg in kwargs:
kwargs[kwarg] = self._marshal(kwargs[kwarg])
result = getattr(self.service, method)(_this=_this, **kwargs)
if hasattr(result, '__iter__') is False:
logger.debug("Returning non-iterable result")
return result
# We must traverse the result and convert any ManagedObjectReference
# to a psphere class, this will then be lazy initialised on use
logger.debug(result.__class__)
logger.debug("Result: %s", result)
logger.debug("Length: %s", len(result))
if type(result) == list:
new_result = []
for item in result:
new_result.append(self._unmarshal(item))
else:
new_result = self._unmarshal(result)
logger.debug("Finished in invoke.")
#property = self.find_and_destroy(property)
#print result
# Return the modified result to the caller
return new_result | 0.003448 |
def index():
"""Show all the posts, most recent first."""
posts = Post.query.order_by(Post.created.desc()).all()
return render_template("blog/index.html", posts=posts) | 0.005587 |
def loadmat(file_name, mdict=None, appendmat=True,
variable_names=None,
marshaller_collection=None, **keywords):
""" Loads data to a MATLAB MAT file.
Reads data from the specified variables (or all) in a MATLAB MAT
file. There are many different formats of MAT files. This package
can only handle the HDF5 based ones (the version 7.3 and later).
As SciPy's ``scipy.io.loadmat`` function can handle the earlier
formats, if this function cannot read the file, it will dispatch it
onto the scipy function with all the calling arguments it uses
passed on. This function is modelled after the SciPy one (arguments
not specific to this package have the same names, etc.).
Parameters
----------
file_name : str
Name of the MAT file to read from. The '.mat' extension is
added on automatically if not present if `appendmat` is set to
``True``.
mdict : dict, optional
The dictionary to insert read variables into
appendmat : bool, optional
Whether to append the '.mat' extension to `file_name` if it
doesn't already end in it or not.
variable_names: None or sequence, optional
The variable names to read from the file. ``None`` selects all.
marshaller_collection : MarshallerCollection, optional
Collection of marshallers from disk to use. Only applicable if
not dispatching to SciPy (version 7.3 and newer files).
**keywords :
Additional keywords arguments to be passed onto
``scipy.io.loadmat`` if dispatching to SciPy if the file is not
a version 7.3 or later format.
Returns
-------
dict
Dictionary of all the variables read from the MAT file (name
as the key, and content as the value).
Raises
------
ImportError
If it is not a version 7.3 .mat file and the ``scipy`` module
can't be found when dispatching to SciPy.
exceptions.CantReadError
If reading the data can't be done.
Notes
-----
Writing the same data and then reading it back from disk using the
HDF5 based version 7.3 format (the functions in this package) or the
older format (SciPy functions) can lead to very different
results. Each package supports a different set of data types and
converts them to and from the same MATLAB types differently.
See Also
--------
savemat : Equivalent function to do writing.
scipy.io.loadmat : SciPy function this one models after and
dispatches to.
Options
reads : Function used to do the actual reading.
"""
# Will first assume that it is the HDF5 based 7.3 format. If an
# OSError occurs, then it wasn't an HDF5 file and the scipy function
# can be tried instead.
try:
# Make the options with the given marshallers.
options = Options(marshaller_collection=marshaller_collection)
# Append .mat if it isn't on the end of the file name and we are
# supposed to.
if appendmat and not file_name.endswith('.mat'):
filename = file_name + '.mat'
else:
filename = file_name
# Read everything if we were instructed.
if variable_names is None:
data = dict()
with h5py.File(filename, mode='r') as f:
for k in f:
# Read if not group_for_references. Data that
# produces errors when read is dicarded (the OSError
# that would happen if this is not an HDF5 file
# would already have happened when opening the
# file).
if f[k].name != options.group_for_references:
try:
data[utilities.unescape_path(k)] = \
utilities.read_data(f, f, k, options)
except:
pass
else:
# Extract the desired fields all together and then pack them
# into a dictionary one by one.
values = reads(paths=variable_names, filename=filename,
options=options)
data = dict()
for i, name in enumerate(variable_names):
data[name] = values[i]
# Read all the variables, stuff them into mdict, and return it.
if mdict is None:
mdict = dict()
for k, v in data.items():
mdict[k] = v
return mdict
except OSError:
import scipy.io
return scipy.io.loadmat(file_name, mdict, appendmat=appendmat,
variable_names=variable_names,
**keywords) | 0.00042 |
def atlas_renderer(layout, coverage_layer, output_path, file_format):
"""Extract composition using atlas generation.
:param layout: QGIS Print Layout object used for producing the report.
:type layout: qgis.core.QgsPrintLayout
:param coverage_layer: Coverage Layer used for atlas map.
:type coverage_layer: QgsMapLayer
:param output_path: The output path of the product.
:type output_path: str
:param file_format: File format of map output, 'pdf' or 'png'.
:type file_format: str
:return: Generated output path(s).
:rtype: str, list
"""
# set the composer map to be atlas driven
composer_map = layout_item(
layout, 'impact-map', QgsLayoutItemMap)
composer_map.setAtlasDriven(True)
composer_map.setAtlasScalingMode(QgsLayoutItemMap.Auto)
# setup the atlas composition and composition atlas mode
atlas_composition = layout.atlas()
atlas_composition.setCoverageLayer(coverage_layer)
atlas_on_single_file = layout.customProperty('singleFile', True)
if file_format == QgisComposerComponentsMetadata.OutputFormat.PDF:
if not atlas_composition.filenameExpression():
atlas_composition.setFilenameExpression(
"'output_'||@atlas_featurenumber")
output_directory = os.path.dirname(output_path)
# we need to set the predefined scales for atlas
project_scales = []
scales = QgsProject.instance().readListEntry(
"Scales", "/ScalesList")[0]
has_project_scales = QgsProject.instance().readBoolEntry(
"Scales", "/useProjectScales")[0]
if not has_project_scales or not scales:
scales_string = str(general_setting("Map/scales", PROJECT_SCALES))
scales = scales_string.split(',')
for scale in scales:
parts = scale.split(':')
if len(parts) == 2:
project_scales.append(float(parts[1]))
layout.reportContext().setPredefinedScales(project_scales)
settings = QgsLayoutExporter.PdfExportSettings()
LOGGER.info('Exporting Atlas')
atlas_output = []
if atlas_on_single_file:
res, error = QgsLayoutExporter.exportToPdf(
atlas_composition, output_path, settings)
atlas_output.append(output_path)
else:
res, error = QgsLayoutExporter.exportToPdfs(
atlas_composition, output_directory, settings)
if res != QgsLayoutExporter.Success:
LOGGER.error(error)
return atlas_output | 0.000389 |
def _unsetLearningMode(self):
"""
Unsets the learning mode, to start inference.
"""
for column in self.L4Columns:
column.setParameter("learn", 0, False)
for column in self.L6Columns:
column.setParameter("learn", 0, False)
for column in self.L2Columns:
column.setParameter("learningMode", 0, False)
for column in self.L5Columns:
column.setParameter("learningMode", 0, False) | 0.01171 |
def lastIndexOf(self, item):
"""
Return the position of the last occurrence of an
item in an array, or -1 if the item is not included in the array.
"""
array = self.obj
i = len(array) - 1
if not (self._clean.isList() or self._clean.isTuple()):
return self._wrap(-1)
while i > -1:
if array[i] is item:
return self._wrap(i)
i -= 1
return self._wrap(-1) | 0.004211 |
def freeze(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
conversion_out_dir_path=None, checkpoint_out_path=None, use_padding_same=False):
"""Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
conversion_out_dir_path=conversion_out_dir_path,
use_padding_same=use_padding_same) as sess:
saver = tf.train.Saver()
with (dummy_context_mgr(checkpoint_out_path) or util.TemporaryDirectory()) as temp_dir_path:
checkpoint_path = checkpoint_out_path or os.path.join(temp_dir_path, 'pose.ckpt')
saver.save(sess, checkpoint_path)
output_node_names = util.output_node_names_string_as_list(output_node_names)
tf_freeze.freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names) | 0.010577 |
def visit_Call(self, node: AST, dfltChaining: bool = True) -> str:
"""Return `node`s representation as function call."""
args = node.args
try:
kwds = node.keywords
except AttributeError:
kwds = []
self.compact = True
args_src = (self.visit(arg) for arg in args)
kwds_src = (self.visit(kwd) for kwd in kwds)
param_src = ', '.join(chain(args_src, kwds_src))
src = f"{self.visit(node.func)}({param_src})"
self.compact = False
return src | 0.00367 |
def imap_batches_unordered(self, batches, chunksize=1):
"""
Augment batches from a generator in a way that does not guarantee to preserve order.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap_unordered(_Pool_starworker, self._handle_batch_ids_gen(batches), chunksize=chunksize)
for batch in gen:
yield batch | 0.007117 |
def create(self, company, timezone, country):
"""Creates a client."""
body = {
"CompanyName": company,
"TimeZone": timezone,
"Country": country}
response = self._post("/clients.json", json.dumps(body))
self.client_id = json_to_py(response)
return self.client_id | 0.005917 |
def rebuildGrid( self ):
"""
Rebuilds the ruler data.
"""
vruler = self.verticalRuler()
hruler = self.horizontalRuler()
rect = self._buildData['grid_rect']
# process the vertical ruler
h_lines = []
h_alt = []
h_notches = []
vpstart = vruler.padStart()
vnotches = vruler.notches()
vpend = vruler.padEnd()
vcount = len(vnotches) + vpstart + vpend
deltay = rect.height() / max((vcount - 1), 1)
y = rect.bottom()
alt = False
for i in range(vcount):
h_lines.append(QLineF(rect.left(), y, rect.right(), y))
# store alternate color
if ( alt ):
alt_rect = QRectF(rect.left(), y, rect.width(), deltay)
h_alt.append(alt_rect)
# store notch information
nidx = i - vpstart
if ( 0 <= nidx and nidx < len(vnotches) ):
notch = vnotches[nidx]
notch_rect = QRectF(0, y - 3, rect.left() - 3, deltay)
h_notches.append((notch_rect, notch))
y -= deltay
alt = not alt
self._buildData['grid_h_lines'] = h_lines
self._buildData['grid_h_alt'] = h_alt
self._buildData['grid_h_notches'] = h_notches
# process the horizontal ruler
v_lines = []
v_alt = []
v_notches = []
hpstart = hruler.padStart()
hnotches = hruler.notches()
hpend = hruler.padEnd()
hcount = len(hnotches) + hpstart + hpend
deltax = rect.width() / max((hcount - 1), 1)
x = rect.left()
alt = False
for i in range(hcount):
v_lines.append(QLineF(x, rect.top(), x, rect.bottom()))
# store alternate info
if ( alt ):
alt_rect = QRectF(x - deltax, rect.top(), deltax, rect.height())
v_alt.append(alt_rect)
# store notch information
nidx = i - hpstart
if ( 0 <= nidx and nidx < len(hnotches) ):
notch = hnotches[nidx]
notch_rect = QRectF(x - (deltax / 2.0),
rect.bottom() + 3,
deltax,
13)
v_notches.append((notch_rect, notch))
x += deltax
alt = not alt
self._buildData['grid_v_lines'] = v_lines
self._buildData['grid_v_alt'] = v_alt
self._buildData['grid_v_notches'] = v_notches
# draw the axis lines
axis_lines = []
axis_lines.append(QLineF(rect.left(),
rect.top(),
rect.left(),
rect.bottom()))
axis_lines.append(QLineF(rect.left(),
rect.bottom(),
rect.right(),
rect.bottom()))
self._buildData['axis_lines'] = axis_lines | 0.019053 |
def search(name):
'''
Search for matches in the ports tree. Globs are supported, and the category
is optional
CLI Examples:
.. code-block:: bash
salt '*' ports.search 'security/*'
salt '*' ports.search 'security/n*'
salt '*' ports.search nmap
.. warning::
Takes a while to run
'''
name = six.text_type(name)
all_ports = list_all()
if '/' in name:
if name.count('/') > 1:
raise SaltInvocationError(
'Invalid search string \'{0}\'. Port names cannot have more '
'than one slash'
)
else:
return fnmatch.filter(all_ports, name)
else:
ret = []
for port in all_ports:
if fnmatch.fnmatch(port.rsplit('/')[-1], name):
ret.append(port)
return ret | 0.001171 |
def get_features(model_description_features):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
]
"""
return utils.get_objectlist(model_description_features,
config_key='features',
module=sys.modules[__name__]) | 0.001196 |
def get_jobs(self, id=None, params=None):
"""
`<>`_
:arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left
blank for all jobs
"""
return self.transport.perform_request(
"GET", _make_path("_rollup", "job", id), params=params
) | 0.006369 |
def getInstalledConfig(installDir, configFile):
"""
Reads config from the installation directory of Plenum.
:param installDir: installation directory of Plenum
:param configFile: name of the configuration file
:raises: FileNotFoundError
:return: the configuration as a python object
"""
configPath = os.path.join(installDir, configFile)
if not os.path.exists(configPath):
raise FileNotFoundError("No file found at location {}".
format(configPath))
spec = spec_from_file_location(configFile, configPath)
config = module_from_spec(spec)
spec.loader.exec_module(config)
return config | 0.001486 |
def do_start_alerts(self, _):
""" Starts the alerter thread """
if self._alerter_thread.is_alive():
print("The alert thread is already started")
else:
self._stop_thread = False
self._alerter_thread = threading.Thread(name='alerter', target=self._alerter_thread_func)
self._alerter_thread.start() | 0.008174 |
def load_overrides(path=None):
"""
Load config overrides from the yml file at |path|, or from default paths. If a path
is provided and it does not exist, raise an exception
Default paths: ./mcore.yml, ./.mcore.yml, ./manticore.yml, ./.manticore.yml.
"""
if path is not None:
names = [path]
else:
possible_names = ['mcore.yml', 'manticore.yml']
names = [os.path.join('.', ''.join(x)) for x in product(['', '.'], possible_names)]
for name in names:
try:
with open(name, 'r') as yml_f:
logger.info(f'Reading configuration from {name}')
parse_config(yml_f)
break
except FileNotFoundError:
pass
else:
if path is not None:
raise FileNotFoundError(f"'{path}' not found for config overrides") | 0.0047 |
def vm_detach(name, kwargs=None, call=None):
'''
Detaches a disk from a virtual machine.
.. versionadded:: 2016.3.0
name
The name of the VM from which to detach the disk.
disk_id
The ID of the disk to detach.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_detach my-vm disk_id=1
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_detach action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
disk_id = kwargs.get('disk_id', None)
if disk_id is None:
raise SaltCloudSystemExit(
'The vm_detach function requires a \'disk_id\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': name}))
response = server.one.vm.detach(auth, vm_id, int(disk_id))
data = {
'action': 'vm.detach',
'detached': response[0],
'vm_id': response[1],
'error_code': response[2],
}
return data | 0.000923 |
def Analyze(self, hashes):
"""Looks up hashes in Viper using the Viper HTTP API.
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: hash analysis.
Raises:
RuntimeError: If no host has been set for Viper.
"""
hash_analyses = []
for digest in hashes:
json_response = self._QueryHash(digest)
hash_analysis = interface.HashAnalysis(digest, json_response)
hash_analyses.append(hash_analysis)
return hash_analyses | 0.007968 |
def rs_find_errors(err_loc, nmess, generator=2):
'''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).'''
# nmess = length of whole codeword (message + ecc symbols)
errs = len(err_loc) - 1
err_pos = []
for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols
if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error
err_pos.append(nmess - 1 - i)
# Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial
if len(err_pos) != errs:
# TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible.
raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!")
return err_pos | 0.005181 |
def get_url(client, name, version, wheel=False, hashed_format=False):
"""Retrieves list of package URLs using PyPI's XML-RPC. Chooses URL
of prefered archive and md5_digest.
"""
try:
release_urls = client.release_urls(name, version)
release_data = client.release_data(name, version)
except BaseException: # some kind of error with client
logger.debug('Client: {0} Name: {1} Version: {2}.'.format(
client, name, version))
raise exceptions.MissingUrlException(
"Some kind of error while communicating with client: {0}.".format(
client), exc_info=True)
url = ''
md5_digest = None
if not wheel:
# Prefered archive is tar.gz
if len(release_urls):
zip_url = zip_md5 = ''
for release_url in release_urls:
if release_url['url'].endswith("tar.gz"):
url = release_url['url']
md5_digest = release_url['md5_digest']
if release_url['url'].endswith(".zip"):
zip_url = release_url['url']
zip_md5 = release_url['md5_digest']
if url == '':
url = zip_url or release_urls[0]['url']
md5_digest = zip_md5 or release_urls[0]['md5_digest']
elif release_data:
url = release_data['download_url']
else:
# Only wheel is acceptable
for release_url in release_urls:
if release_url['url'].endswith("none-any.whl"):
url = release_url['url']
md5_digest = release_url['md5_digest']
break
if not url:
raise exceptions.MissingUrlException(
"Url of source archive not found.")
if url == 'UNKNOWN':
raise exceptions.MissingUrlException(
"{0} package has no sources on PyPI, Please ask the maintainer "
"to upload sources.".format(release_data['name']))
if not hashed_format:
url = ("https://files.pythonhosted.org/packages/source"
"/{0[0]}/{0}/{1}").format(name, url.split("/")[-1])
return (url, md5_digest) | 0.000461 |
def backfill(self, data, resolution, start, end=None):
"""
Backfills missing historical data
:Optional:
data : pd.DataFrame
Minimum required bars for backfill attempt
resolution : str
Algo resolution
start: datetime
Backfill start date (YYYY-MM-DD [HH:MM:SS[.MS]).
end: datetime
Backfill end date (YYYY-MM-DD [HH:MM:SS[.MS]). Default is None
:Returns:
status : mixed
False for "won't backfill" / True for "backfilling, please wait"
"""
data.sort_index(inplace=True)
# currenly only supporting minute-data
if resolution[-1] in ("K", "V"):
self.backfilled = True
return None
# missing history?
start_date = parse_date(start)
end_date = parse_date(end) if end else datetime.utcnow()
if data.empty:
first_date = datetime.utcnow()
last_date = datetime.utcnow()
else:
first_date = tools.datetime64_to_datetime(data.index.values[0])
last_date = tools.datetime64_to_datetime(data.index.values[-1])
ib_lookback = None
if start_date < first_date:
ib_lookback = tools.ib_duration_str(start_date)
elif end_date > last_date:
ib_lookback = tools.ib_duration_str(last_date)
if not ib_lookback:
self.backfilled = True
return None
self.backfill_resolution = "1 min" if resolution[-1] not in (
"K", "V", "S") else "1 sec"
self.log_blotter.warning("Backfilling historical data from IB...")
# request parameters
params = {
"lookback": ib_lookback,
"resolution": self.backfill_resolution,
"data": "TRADES",
"rth": False,
"end_datetime": None,
"csv_path": None
}
# if connection is active - request data
self.ibConn.requestHistoricalData(**params)
# wait for backfill to complete
while not self.backfilled:
time.sleep(0.01)
# otherwise, pass the parameters to the caller
return True | 0.001332 |
def run(self, event):
'''
这个方法是被 QA_ThreadEngine 处理队列时候调用的, QA_Task 中 do 方法调用 run (在其它线程中)
'QA_WORKER method 重载'
:param event: 事件类型 QA_Event
:return:
'''
'QA_WORKER method'
if event.event_type is ACCOUNT_EVENT.SETTLE:
print('account_settle')
self.settle()
# elif event.event_type is ACCOUNT_EVENT.UPDATE:
# self.receive_deal(event.message)
elif event.event_type is ACCOUNT_EVENT.MAKE_ORDER:
"""generate order
if callback callback the order
if not return back the order
"""
data = self.send_order(
code=event.code,
amount=event.amount,
time=event.time,
amount_model=event.amount_model,
towards=event.towards,
price=event.price,
order_model=event.order_model
)
if event.callback:
event.callback(data)
else:
return data
elif event.event_type is ENGINE_EVENT.UPCOMING_DATA:
"""update the market_data
1. update the inside market_data struct
2. tell the on_bar methods
# 这样有点慢
"""
self._currenttime = event.market_data.datetime[0]
if self._market_data is None:
self._market_data = event.market_data
else:
self._market_data = self._market_data + event.market_data
self.on_bar(event)
if event.callback:
event.callback(event) | 0.00121 |
def on_build_finished(app, exception):
"""
Hooks into Sphinx's ``build-finished`` event.
"""
if not app.config["uqbar_book_use_cache"]:
return
logger.info("")
for row in app.connection.execute("SELECT path, hits FROM cache ORDER BY path"):
path, hits = row
if not hits:
continue
logger.info(bold("[uqbar-book]"), nonl=True)
logger.info(" Cache hits for {}: {}".format(path, hits)) | 0.004386 |
def close(self):
"Close the shelve object, which is needed for data consistency."
if self.is_open:
logger.info('closing shelve data')
try:
self.shelve.close()
self._shelve.clear()
except Exception:
self.is_open = False | 0.006289 |
def u_distance_stats_sqr(x, y, **kwargs):
"""
u_distance_stats_sqr(x, y, *, exponent=1)
Computes the unbiased estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
Stats
Squared distance covariance, squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also
--------
u_distance_covariance_sqr
u_distance_correlation_sqr
Notes
-----
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.u_distance_stats_sqr(a, a) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=42.6666666..., correlation_xy=1.0,
variance_x=42.6666666..., variance_y=42.6666666...)
>>> dcor.u_distance_stats_sqr(a, b) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=-2.6666666..., correlation_xy=-0.5,
variance_x=42.6666666..., variance_y=0.6666666...)
>>> dcor.u_distance_stats_sqr(b, b) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.6666666..., correlation_xy=1.0,
variance_x=0.6666666..., variance_y=0.6666666...)
>>> dcor.u_distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=-0.2996598..., correlation_xy=-0.4050479...,
variance_x=0.8209855..., variance_y=0.6666666...)
"""
if _can_use_fast_algorithm(x, y, **kwargs):
return _u_distance_stats_sqr_fast(x, y)
else:
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_u_distance_matrix,
product=u_product,
**kwargs) | 0.000333 |
def get_stats_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/stats/contributors <http://developer.github.com/v3/repos/statistics/#get-contributors-list-with-additions-deletions-and-commit-counts>`_
:rtype: None or list of :class:`github.StatsContributor.StatsContributor`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/contributors"
)
if not data:
return None
else:
return [
github.StatsContributor.StatsContributor(self._requester, headers, attributes, completed=True)
for attributes in data
] | 0.007184 |
def join(self, timeout=None):
"""Blocking wait for the execution to finish
:param float timeout: Maximum time to wait or None for infinitely
:return: True if the execution finished, False if no state machine was started or a timeout occurred
:rtype: bool
"""
if self.__wait_for_finishing_thread:
if not timeout:
# signal handlers won't work if timeout is None and the thread is joined
while True:
self.__wait_for_finishing_thread.join(0.5)
if not self.__wait_for_finishing_thread.isAlive():
break
else:
self.__wait_for_finishing_thread.join(timeout)
return not self.__wait_for_finishing_thread.is_alive()
else:
logger.warning("Cannot join as state machine was not started yet.")
return False | 0.004348 |
def write(self, data):
"""
Write data to the file. If write buffering is on (``bufsize`` was
specified and non-zero), some or all of the data may not actually be
written yet. (Use `flush` or `close` to force buffered data to be
written out.)
:param data: ``str``/``bytes`` data to write
"""
if isinstance(data, text_type):
# Accept text and encode as utf-8 for compatibility only.
data = data.encode("utf-8")
if self._closed:
raise IOError("File is closed")
if not (self._flags & self.FLAG_WRITE):
raise IOError("File not open for writing")
if not (self._flags & self.FLAG_BUFFERED):
self._write_all(data)
return
self._wbuffer.write(data)
if self._flags & self.FLAG_LINE_BUFFERED:
# only scan the new data for linefeed, to avoid wasting time.
last_newline_pos = data.rfind(linefeed_byte)
if last_newline_pos >= 0:
wbuf = self._wbuffer.getvalue()
last_newline_pos += len(wbuf) - len(data)
self._write_all(wbuf[: last_newline_pos + 1])
self._wbuffer = BytesIO()
self._wbuffer.write(wbuf[last_newline_pos + 1 :])
return
# even if we're line buffering, if the buffer has grown past the
# buffer size, force a flush.
if self._wbuffer.tell() >= self._bufsize:
self.flush()
return | 0.001969 |
def console_map_string_to_font(s: str, fontCharX: int, fontCharY: int) -> None:
"""Remap a string of codes to a contiguous set of tiles.
Args:
s (AnyStr): A string of character codes to map to new values.
The null character `'\\x00'` will prematurely end this
function.
fontCharX (int): The starting X tile coordinate on the loaded tileset.
0 is the leftmost tile.
fontCharY (int): The starting Y tile coordinate on the loaded tileset.
0 is the topmost tile.
"""
lib.TCOD_console_map_string_to_font_utf(_unicode(s), fontCharX, fontCharY) | 0.001497 |
async def recv(self):
"""
Receives a packet of data through this connection mode.
This method returns a coroutine.
"""
while self._connected:
result = await self._recv_queue.get()
if result: # None = sentinel value = keep trying
return result
raise ConnectionError('Not connected') | 0.005376 |
def p_union(self, p):
'''union : UNION IDENTIFIER '{' field_seq '}' annotations'''
p[0] = ast.Union(
name=p[2], fields=p[4], annotations=p[6], lineno=p.lineno(2)
) | 0.01005 |
def get_short_url(self):
""" Returns short version of topic url (without page number) """
return reverse('post_short_url', args=(self.forum.slug, self.slug, self.id)) | 0.016304 |
def pad_vocabulary(self, vocab, pad):
"""
Pads vocabulary to a multiple of 'pad' tokens.
:param vocab: list with vocabulary
:param pad: integer
"""
vocab_size = len(vocab)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:04d}'
vocab.append(token)
assert len(vocab) % pad == 0 | 0.004415 |
def get_output(script, expanded):
"""Runs the script and obtains stdin/stderr.
:type script: str
:type expanded: str
:rtype: str | None
"""
env = dict(os.environ)
env.update(settings.env)
is_slow = shlex.split(expanded) in settings.slow_commands
with logs.debug_time(u'Call: {}; with env: {}; is slow: '.format(
script, env, is_slow)):
result = Popen(expanded, shell=True, stdin=PIPE,
stdout=PIPE, stderr=STDOUT, env=env)
if _wait_output(result, is_slow):
output = result.stdout.read().decode('utf-8')
logs.debug(u'Received output: {}'.format(output))
return output
else:
logs.debug(u'Execution timed out!')
return None | 0.001287 |
def fast_kde(x, y, gridsize=(200,200), extents=None, nocorrelation=False, weights=None):
"""
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
This function is typically several orders of magnitude faster than
scipy.stats.kde.gaussian_kde for large (>1e7) numbers of points and
produces an essentially identical result.
Input:
x: The x-coords of the input data points
y: The y-coords of the input data points
gridsize: (default: 200x200) A (nx,ny) tuple of the size of the output
grid
extents: (default: extent of input data) A (xmin, xmax, ymin, ymax)
tuple of the extents of output grid
nocorrelation: (default: False) If True, the correlation between the
x and y coords will be ignored when preforming the KDE.
weights: (default: None) An array of the same shape as x & y that
weighs each sample (x_i, y_i) by each value in weights (w_i).
Defaults to an array of ones the same size as x & y.
Output:
A gridded 2D kernel density estimate of the input points.
"""
#---- Setup --------------------------------------------------------------
x, y = np.asarray(x), np.asarray(y)
x, y = np.squeeze(x), np.squeeze(y)
if x.size != y.size:
raise ValueError('Input x & y arrays must be the same size!')
nx, ny = gridsize
n = x.size
if weights is None:
# Default: Weight all points equally
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size'
' as input x & y arrays!')
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
else:
xmin, xmax, ymin, ymax = list(map(float, extents))
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
#---- Preliminary Calculations -------------------------------------------
# First convert x & y over to pixel coordinates
# (Avoiding np.digitize due to excessive memory usage in numpy < v1.5!)
# http://stackoverflow.com/q/8805601/
xyi = np.vstack((x,y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# Next, make a 2D histogram of x & y
# Avoiding np.histogram2d due to excessive memory usage with many points
# http://stackoverflow.com/q/8805601/
grid = sp.sparse.coo_matrix((weights, xyi), shape=(nx, ny)).toarray()
# Calculate the covariance matrix (in pixel coords)
cov = np.cov(xyi)
if nocorrelation:
cov[1,0] = 0
cov[0,1] = 0
# Scaling factor for bandwidth
scotts_factor = np.power(n, -1.0 / 6) # For 2D
#---- Make the gaussian kernel -------------------------------------------
# First, determine how big the kernel needs to be
std_devs = np.diag(np.sqrt(cov))
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
# Determine the bandwidth to use for the gaussian kernel
inv_cov = np.linalg.inv(cov * scotts_factor**2)
# x & y (pixel) coords of the kernel grid, with <x,y> = <0,0> in center
xx = np.arange(kern_nx, dtype=np.float) - kern_nx / 2.0
yy = np.arange(kern_ny, dtype=np.float) - kern_ny / 2.0
xx, yy = np.meshgrid(xx, yy)
# Then evaluate the gaussian function on the kernel grid
kernel = np.vstack((xx.flatten(), yy.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.sum(kernel, axis=0) / 2.0
kernel = np.exp(-kernel)
kernel = kernel.reshape((kern_ny, kern_nx))
#---- Produce the kernel density estimate --------------------------------
# Convolve the gaussian kernel with the 2D histogram, producing a gaussian
# kernel density estimate on a regular grid
grid = sp.signal.convolve2d(grid, kernel, mode='same', boundary='fill').T
### ADW: Commented out for
### # Normalization factor to divide result by so that units are in the same
### # units as scipy.stats.kde.gaussian_kde's output.
### norm_factor = 2 * np.pi * cov * scotts_factor**2
### norm_factor = np.linalg.det(norm_factor)
### norm_factor = n * dx * dy * np.sqrt(norm_factor)
###
### # Normalize the result
### grid /= norm_factor
return grid | 0.007749 |
def load_glove_df(filepath, **kwargs):
""" Load a GloVE-format text file into a dataframe
>>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt'))
>>> df.index[:3]
Index(['the', ',', '.'], dtype='object', name=0)
>>> df.iloc[0][:3]
1 0.41800
2 0.24968
3 -0.41242
Name: the, dtype: float64
"""
pdkwargs = dict(index_col=0, header=None, sep=r'\s', skiprows=[0], verbose=False, engine='python')
pdkwargs.update(kwargs)
return pd.read_csv(filepath, **pdkwargs) | 0.003781 |
def get_vnetwork_hosts_output_vnetwork_hosts_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
interface_name = ET.SubElement(vnetwork_hosts, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003311 |
def setHeight(self, personID, height):
"""setHeight(string, double) -> None
Sets the height in m for this person.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_HEIGHT, personID, height) | 0.007813 |
def write_checktime (self, url_data):
"""Write url_data.checktime."""
self.writeln(u"<tr><td>"+self.part("checktime")+u"</td><td>"+
(_("%.3f seconds") % url_data.checktime)+u"</td></tr>") | 0.017857 |
def index(args):
"""
%prog index bedfile
Compress and index bedfile using `tabix`. Use --fasta to give a FASTA file
so that a bedgraph file can be generated and indexed.
"""
p = OptionParser(index.__doc__)
p.add_option("--fasta", help="Generate bedgraph and index")
p.add_option("--query", help="Chromosome location")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fastafile = opts.fasta
if fastafile:
bedfile = make_bedgraph(bedfile, fastafile)
bedfile = sort([bedfile])
gzfile = bedfile + ".gz"
if need_update(bedfile, gzfile):
cmd = "bgzip {0}".format(bedfile)
sh(cmd)
tbifile = gzfile + ".tbi"
if need_update(gzfile, tbifile):
cmd = "tabix -p bed {0}".format(gzfile)
sh(cmd)
query = opts.query
if not query:
return
cmd = "tabix {0} {1}".format(gzfile, query)
sh(cmd, outfile=opts.outfile) | 0.000986 |
def view_500(request, url=None):
"""
it returns a 500 http response
"""
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res | 0.009662 |
def show_option(self, option, _global=False):
"""Return a list of options for the window.
Parameters
----------
option : str
option name
_global : bool, optional
use global option scope, same as ``-g``
Returns
-------
str, int, or bool
Raises
------
:exc:`exc.OptionError`, :exc:`exc.UnknownOption`,
:exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
Notes
-----
Uses ``_global`` for keyword name instead of ``global`` to avoid
colliding with reserved keyword.
Test and return True/False for on/off string.
"""
tmux_args = tuple()
if _global:
tmux_args += ('-g',)
tmux_args += (option,)
cmd = self.cmd('show-options', *tmux_args)
if isinstance(cmd.stderr, list) and len(cmd.stderr):
handle_option_error(cmd.stderr[0])
if not len(cmd.stdout):
return None
option = [item.split(' ') for item in cmd.stdout][0]
if option[1].isdigit():
option = (option[0], int(option[1]))
return option[1] | 0.001684 |
def explode_azure_path(path):
# type: (str) -> Tuple[str, str]
"""Explodes an azure path into a container or fileshare and the
remaining virtual path
:param str path: path to explode
:rtype: tuple
:return: container, vpath
"""
rpath = normalize_azure_path(path).split('/')
container = str(rpath[0])
if len(rpath) > 1:
rpath = '/'.join(rpath[1:])
else:
rpath = ''
return container, rpath | 0.002222 |
def get_async(self, **ctx_options):
"""Return a Future whose result is the entity for this Key.
If no such entity exists, a Future is still returned, and the
Future's eventual return result be None.
"""
from . import model, tasklets
ctx = tasklets.get_context()
cls = model.Model._kind_map.get(self.kind())
if cls:
cls._pre_get_hook(self)
fut = ctx.get(self, **ctx_options)
if cls:
post_hook = cls._post_get_hook
if not cls._is_default_hook(model.Model._default_post_get_hook,
post_hook):
fut.add_immediate_callback(post_hook, self, fut)
return fut | 0.006126 |
def bind_queues(self, bindings):
"""
Declare a set of bindings between queues and exchanges.
Args:
bindings (list of dict): A list of binding definitions. Each dictionary
must contain the "queue" key whose value is the name of the queue
to create the binding on, as well as the "exchange" key whose value
should be the name of the exchange to bind to. Additional acceptable
keys are any keyword arguments accepted by
:meth:`pika.channel.Channel.queue_bind`.
Raises:
NoFreeChannels: If there are no available channels on this connection.
If this occurs, you can either reduce the number of consumers on this
connection or create an additional connection.
BadDeclaration: If a binding could not be declared. This can occur if the
queue or exchange don't exist, or if they do, but the current user does
not have permissions to create bindings.
"""
channel = yield self._allocate_channel()
try:
for binding in bindings:
try:
yield channel.queue_bind(**binding)
except pika.exceptions.ChannelClosed as e:
raise BadDeclaration("binding", binding, e)
finally:
try:
channel.close()
except pika.exceptions.AMQPError:
pass | 0.006676 |
def _get_argument(self, argument_node):
"""
Returns a FritzActionArgument instance for the given argument_node.
"""
argument = FritzActionArgument()
argument.name = argument_node.find(self.nodename('name')).text
argument.direction = argument_node.find(self.nodename('direction')).text
rsv = argument_node.find(self.nodename('relatedStateVariable')).text
# TODO: track malformed xml-nodes (i.e. misspelled)
argument.data_type = self.state_variables.get(rsv, None)
return argument | 0.005376 |
def _parse_plt_segment(self, fptr):
"""Parse the PLT segment.
The packet headers are not parsed, i.e. they remain uninterpreted raw
data buffers.
Parameters
----------
fptr : file
Open file object.
Returns
-------
PLTSegment
The current PLT segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(3)
length, zplt = struct.unpack('>HB', read_buffer)
numbytes = length - 3
read_buffer = fptr.read(numbytes)
iplt = np.frombuffer(read_buffer, dtype=np.uint8)
packet_len = []
plen = 0
for byte in iplt:
plen |= (byte & 0x7f)
if byte & 0x80:
# Continue by or-ing in the next byte.
plen <<= 7
else:
packet_len.append(plen)
plen = 0
iplt = packet_len
return PLTsegment(zplt, iplt, length, offset) | 0.002012 |
def get_html(self) -> str:
"""Return complete report as a HTML string."""
data = self.getdoc()
num_checks = 0
body_elements = []
# Order by section first...
for section in data["sections"]:
section_name = html.escape(section["key"][0])
section_stati_of_note = (
e for e in section["result"].elements() if e != "PASS"
)
section_stati = "".join(
EMOTICON[s] for s in sorted(section_stati_of_note, key=LOGLEVELS.index)
)
body_elements.append(f"<h2>{section_name} {section_stati}</h2>")
checks_by_id: Dict[str, List[Dict[str, str]]] = collections.defaultdict(
list
)
# ...and check second.
for cluster in section["checks"]:
if not isinstance(cluster, list):
cluster = [cluster]
num_checks += len(cluster)
for check in cluster:
checks_by_id[check["key"][1]].append(check)
for check, results in checks_by_id.items():
check_name = html.escape(check)
body_elements.append(f"<h3>{results[0]['description']}</h3>")
body_elements.append(f"<div>Check ID: {check_name}</div>")
for result in results:
if "filename" in result:
body_elements.append(
html5_collapsible(
f"{EMOTICON[result['result']]} <strong>{result['filename']}</strong>",
self.html_for_check(result),
)
)
else:
body_elements.append(
html5_collapsible(
f"{EMOTICON[result['result']]} <strong>Family check</strong>",
self.html_for_check(result),
)
)
body_top = [
"<h1>Fontbakery Technical Report</h1>",
"<div>If you think a check is flawed or have an idea for a check, please "
f" file an issue at <a href='{ISSUE_URL}'>{ISSUE_URL}</a> and remember "
"to include a pointer to the repo and branch you're checking.</div>",
]
if num_checks:
results_summary = [data["result"][k] for k in LOGLEVELS]
body_top.append(summary_table(*results_summary, num_checks))
omitted = [l for l in LOGLEVELS if self.omit_loglevel(l)]
if omitted:
body_top.append(
"<p><strong>Note:</strong>"
" The following loglevels were omitted in this report:"
f" {', '.join(omitted)}</p>"
)
body_elements[0:0] = body_top
return html5_document(body_elements) | 0.003406 |
def check_version(self, timeout=2, strict=False, topics=[]):
"""Attempt to guess the broker version.
Note: This is a blocking call.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
"""
timeout_at = time.time() + timeout
log.info('Probing node %s broker version', self.node_id)
# Monkeypatch some connection configurations to avoid timeouts
override_config = {
'request_timeout_ms': timeout * 1000,
'max_in_flight_requests_per_connection': 5
}
stashed = {}
for key in override_config:
stashed[key] = self.config[key]
self.config[key] = override_config[key]
# kafka kills the connection when it doesn't recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
from kafka.protocol.admin import ApiVersionRequest, ListGroupsRequest
from kafka.protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest
test_cases = [
# All cases starting from 0.10 will be based on ApiVersionResponse
((0, 10), ApiVersionRequest[0]()),
((0, 9), ListGroupsRequest[0]()),
((0, 8, 2), GroupCoordinatorRequest[0]('kafka-python-default-group')),
((0, 8, 1), OffsetFetchRequest[0]('kafka-python-default-group', [])),
((0, 8, 0), MetadataRequest[0](topics)),
]
for version, request in test_cases:
if not self.connect_blocking(timeout_at - time.time()):
raise Errors.NodeNotReadyError()
f = self.send(request)
# HACK: sleeping to wait for socket to send bytes
time.sleep(0.1)
# when broker receives an unrecognized request API
# it abruptly closes our socket.
# so we attempt to send a second request immediately
# that we believe it will definitely recognize (metadata)
# the attempt to write to a disconnected socket should
# immediately fail and allow us to infer that the prior
# request was unrecognized
mr = self.send(MetadataRequest[0](topics))
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_READ)
while not (f.is_done and mr.is_done):
selector.select(1)
for response, future in self.recv():
future.success(response)
selector.close()
if f.succeeded():
if isinstance(request, ApiVersionRequest[0]):
# Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
log.info('Broker version identifed as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
# Only enable strict checking to verify that we understand failure
# modes. For most users, the fact that the request failed should be
# enough to rule out a particular broker version.
if strict:
# If the socket flush hack did not work (which should force the
# connection to close and fail all pending requests), then we
# get a basic Request Timeout. This is not ideal, but we'll deal
if isinstance(f.exception, Errors.RequestTimedOutError):
pass
# 0.9 brokers do not close the socket on unrecognized api
# requests (bug...). In this case we expect to see a correlation
# id mismatch
elif (isinstance(f.exception, Errors.CorrelationIdError) and
version == (0, 10)):
pass
elif six.PY2:
assert isinstance(f.exception.args[0], socket.error)
assert f.exception.args[0].errno in (32, 54, 104)
else:
assert isinstance(f.exception.args[0], ConnectionError)
log.info("Broker is not v%s -- it did not recognize %s",
version, request.__class__.__name__)
else:
raise Errors.UnrecognizedBrokerVersion()
for key in stashed:
self.config[key] = stashed[key]
return version | 0.001859 |
def register_classes(self, classes):
"""
Register classes as plugins that are not subclassed from
IPlugin.
`classes` may be a single object or an iterable.
"""
classes = util.return_list(classes)
for klass in classes:
IPlugin.register(klass) | 0.006472 |
def syllable_tokenize(text: str) -> List[str]:
"""
:param str text: input string to be tokenized
:return: list of syllables
"""
if not text or not isinstance(text, str):
return []
tokens = []
if text:
words = word_tokenize(text)
trie = dict_trie(dict_source=thai_syllables())
for word in words:
tokens.extend(word_tokenize(text=word, custom_dict=trie))
return tokens | 0.002247 |
def sample_static_posterior(self, inputs, samples):
"""Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
Returns:
A tuple of a sample tensor of shape [samples, batch_size,
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [..., batch_size].
"""
dist = self.static_encoder(inputs)
sample = dist.sample(samples)
return sample, dist | 0.001439 |
def find_biclique_embedding(a, b, m, n=None, t=None, target_edges=None):
"""Find an embedding for a biclique in a Chimera graph.
Given a target :term:`Chimera` graph size, and a biclique (a bipartite graph where every
vertex in a set in connected to all vertices in the other set), attempts to find an embedding.
Args:
a (int/iterable):
Left shore of the biclique to embed. If a is an integer, generates an embedding
for a biclique with the left shore of size a labelled [0,a-1].
If a is an iterable, generates an embedding for a biclique with the left shore of size
len(a), where iterable a is the variable labels.
b (int/iterable):
Right shore of the biclique to embed.If b is an integer, generates an embedding
for a biclique with the right shore of size b labelled [0,b-1].
If b is an iterable, generates an embedding for a biclique with the right shore of
size len(b), where iterable b provides the variable labels.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
target_edges (iterable[edge]):
A list of edges in the target Chimera graph. Nodes are labelled as
returned by :func:`~dwave_networkx.generators.chimera_graph`.
Returns:
tuple: A 2-tuple containing:
dict: An embedding mapping the left shore of the biclique to the Chimera lattice.
dict: An embedding mapping the right shore of the biclique to the Chimera lattice
Examples:
This example finds an embedding for an alphanumerically labeled biclique in a single
Chimera unit cell.
>>> from dwave.embedding.chimera import find_biclique_embedding
...
>>> left, right = find_biclique_embedding(['a', 'b', 'c'], ['d', 'e'], 1, 1)
>>> print(left, right) # doctest: +SKIP
{'a': [4], 'b': [5], 'c': [6]} {'d': [0], 'e': [1]}
"""
_, anodes = a
_, bnodes = b
m, n, t, target_edges = _chimera_input(m, n, t, target_edges)
embedding = processor(target_edges, M=m, N=n, L=t).tightestNativeBiClique(len(anodes), len(bnodes))
if not embedding:
raise ValueError("cannot find a K{},{} embedding for given Chimera lattice".format(a, b))
left, right = embedding
return dict(zip(anodes, left)), dict(zip(bnodes, right)) | 0.005033 |
def move_in_8(library, session, space, offset, length, extended=False):
"""Moves an 8-bit block of data from the specified address space and offset to local memory.
Corresponds to viMoveIn8* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param length: Number of elements to transfer, where the data width of the elements to transfer
is identical to the source data width.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from the bus, return value of the library call.
:rtype: list, :class:`pyvisa.constants.StatusCode`
"""
buffer_8 = (ViUInt8 * length)()
if extended:
ret = library.viMoveIn8Ex(session, space, offset, length, buffer_8)
else:
ret = library.viMoveIn8(session, space, offset, length, buffer_8)
return list(buffer_8), ret | 0.003626 |
def is_gvcf_file(in_file):
"""Check if an input file is raw gVCF
"""
to_check = 100
n = 0
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("##"):
if n > to_check:
break
n += 1
parts = line.split("\t")
# GATK
if parts[4] == "<NON_REF>":
return True
# strelka2
if parts[4] == "." and parts[7].startswith("BLOCKAVG"):
return True
# freebayes
if parts[4] == "<*>":
return True
# platypue
if parts[4] == "N" and parts[6] == "REFCALL":
return True | 0.001247 |
def write_events(stream, events):
'''
Write a sequence of Event protos to file-like object `stream`.
'''
for event in events:
data = event.SerializeToString()
len_field = struct.pack('<Q', len(data))
len_crc = struct.pack('<I', masked_crc(len_field))
data_crc = struct.pack('<I', masked_crc(data))
stream.write(len_field)
stream.write(len_crc)
stream.write(data)
stream.write(data_crc) | 0.002151 |
def sync_counts(**kwargs):
"""
Iterates over registered recipes and denormalizes ``Badge.users.count()``
into ``Badge.users_count`` field.
"""
badges = kwargs.get('badges')
excluded = kwargs.get('exclude_badges')
instances = registry.get_recipe_instances(badges=badges, excluded=excluded)
updated_badges, unchanged_badges = [], []
for instance in instances:
reset_queries()
badge, updated = instance.update_badge_users_count()
if updated:
updated_badges.append(badge)
else:
unchanged_badges.append(badge)
log_queries(instance)
return (updated_badges, unchanged_badges) | 0.001481 |
def save(self, *args, **kwargs):
"""Override the default ``save`` method."""
if not self.status:
self.status = self.DRAFT
# Published pages should always have a publication date
if self.publication_date is None and self.status == self.PUBLISHED:
self.publication_date = get_now()
# Drafts should not, unless they have been set to the future
if self.status == self.DRAFT:
if settings.PAGE_SHOW_START_DATE:
if (self.publication_date and
self.publication_date <= get_now()):
self.publication_date = None
else:
self.publication_date = None
self.last_modification_date = get_now()
super(Page, self).save(*args, **kwargs)
# fix sites many-to-many link when the're hidden from the form
if settings.PAGE_HIDE_SITES and self.sites.count() == 0:
self.sites.add(Site.objects.get(pk=global_settings.SITE_ID)) | 0.001976 |
def add_argument(self, *args, parser=None, autoenv=False, env=None,
complete=None, **kwargs):
""" Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action. """
if parser is None:
parser = self.argparser
action = parser.add_argument(*args, **kwargs)
if autoenv:
if env is not None:
raise TypeError('Arguments `env` and `autoenv` are mutually '
'exclusive')
env = self._make_autoenv(action)
if env:
self.argparser.bind_env(action, env)
if autoenv:
self._autoenv_actions.add(action)
if complete:
action.complete = complete
return action | 0.003386 |
def _numToTwoByteString(value, numberOfDecimals=0, LsbFirst=False, signed=False):
"""Convert a numerical value to a two-byte string, possibly scaling it.
Args:
* value (float or int): The numerical value to be converted.
* numberOfDecimals (int): Number of decimals, 0 or more, for scaling.
* LsbFirst (bol): Whether the least significant byte should be first in the resulting string.
* signed (bol): Whether negative values should be accepted.
Returns:
A two-byte string.
Raises:
TypeError, ValueError. Gives DeprecationWarning instead of ValueError
for some values in Python 2.6.
Use ``numberOfDecimals=1`` to multiply ``value`` by 10 before sending it to the slave register.
Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register.
Use the parameter ``signed=True`` if making a bytestring that can hold
negative values. Then negative input will be automatically converted into
upper range data (two's complement).
The byte order is controlled by the ``LsbFirst`` parameter, as seen here:
====================== ============= ====================================
``LsbFirst`` parameter Endianness Description
====================== ============= ====================================
False (default) Big-endian Most significant byte is sent first
True Little-endian Least significant byte is sent first
====================== ============= ====================================
For example:
To store for example value=77.0, use ``numberOfDecimals = 1`` if the register will hold it as 770 internally.
The value 770 (dec) is 0302 (hex), where the most significant byte is 03 (hex) and the
least significant byte is 02 (hex). With ``LsbFirst = False``, the most significant byte is given first
why the resulting string is ``\\x03\\x02``, which has the length 2.
"""
_checkNumerical(value, description='inputvalue')
_checkInt(numberOfDecimals, minvalue=0, description='number of decimals')
_checkBool(LsbFirst, description='LsbFirst')
_checkBool(signed, description='signed parameter')
multiplier = 10 ** numberOfDecimals
integer = int(float(value) * multiplier)
if LsbFirst:
formatcode = '<' # Little-endian
else:
formatcode = '>' # Big-endian
if signed:
formatcode += 'h' # (Signed) short (2 bytes)
else:
formatcode += 'H' # Unsigned short (2 bytes)
outstring = _pack(formatcode, integer)
assert len(outstring) == 2
return outstring | 0.002996 |
def host_delete(hostids, **kwargs):
'''
Delete hosts.
.. versionadded:: 2016.3.0
:param hostids: Hosts (hostids) to delete.
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: IDs of the deleted hosts.
CLI Example:
.. code-block:: bash
salt '*' zabbix.host_delete 10106
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'host.delete'
if not isinstance(hostids, list):
params = [hostids]
else:
params = hostids
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['hostids']
else:
raise KeyError
except KeyError:
return ret | 0.003724 |
def _GetDisplayPath(self, path_spec, full_path, data_stream_name):
"""Retrieves a path to display.
Args:
path_spec (dfvfs.PathSpec): path specification of the file entry.
full_path (str): full path of the file entry.
data_stream_name (str): name of the data stream.
Returns:
str: path to display.
"""
display_path = ''
if path_spec.HasParent():
parent_path_spec = path_spec.parent
if parent_path_spec and parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
display_path = ''.join([display_path, parent_path_spec.location])
display_path = ''.join([display_path, full_path])
if data_stream_name:
display_path = ':'.join([display_path, data_stream_name])
return display_path | 0.006203 |
def get_forks(self, repository_name_or_id, collection_id, project=None, include_links=None):
"""GetForks.
[Preview API] Retrieve all forks of a repository in the collection.
:param str repository_name_or_id: The name or ID of the repository.
:param str collection_id: Team project collection ID.
:param str project: Project ID or project name
:param bool include_links: True to include links.
:rtype: [GitRepositoryRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_name_or_id is not None:
route_values['repositoryNameOrId'] = self._serialize.url('repository_name_or_id', repository_name_or_id, 'str')
if collection_id is not None:
route_values['collectionId'] = self._serialize.url('collection_id', collection_id, 'str')
query_parameters = {}
if include_links is not None:
query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool')
response = self._send(http_method='GET',
location_id='158c0340-bf6f-489c-9625-d572a1480d57',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitRepositoryRef]', self._unwrap_collection(response)) | 0.005941 |
def from_xml(cls, xml_bytes):
"""
Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse}
"""
root = XML(xml_bytes)
return cls(root.findtext('Bucket'),
root.findtext('Key'),
root.findtext('UploadId')) | 0.005208 |
def id(self, id):
"""
Sets the id of this Shift.
UUID for this object
:param id: The id of this Shift.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
if len(id) > 255:
raise ValueError("Invalid value for `id`, length must be less than `255`")
self._id = id | 0.007481 |
def full_path(self):
""" Return a full path to a current session directory. A result is made by joining a start path with
current session directory
:return: str
"""
return self.normalize_path(self.directory_sep().join((self.start_path(), self.session_path()))) | 0.03321 |
def check(self, var):
"""Return True if the variable matches the specified type."""
return (isinstance(var, _int_type) and
(self._lower_bound is None or var >= self._lower_bound) and
(self._upper_bound is None or var <= self._upper_bound)) | 0.006969 |
def reverse(
self,
query,
exactly_one=DEFAULT_SENTINEL,
timeout=DEFAULT_SENTINEL,
kind=None,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
.. versionchanged:: 1.14.0
Default value for ``exactly_one`` was ``False``, which differs
from the conventional default across geopy. Please always pass
this argument explicitly, otherwise you would get a warning.
In geopy 2.0 the default value will become ``True``.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str kind: Type of toponym. Allowed values: `house`, `street`, `metro`,
`district`, `locality`.
.. versionadded:: 1.14.0
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if exactly_one is DEFAULT_SENTINEL:
warnings.warn('%s.reverse: default value for `exactly_one` '
'argument will become True in geopy 2.0. '
'Specify `exactly_one=False` as the argument '
'explicitly to get rid of this warning.' % type(self).__name__,
DeprecationWarning, stacklevel=2)
exactly_one = False
try:
point = self._coerce_point_to_string(query, "%(lon)s,%(lat)s")
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'geocode': point,
'format': 'json'
}
if self.api_key:
params['apikey'] = self.api_key
if self.lang:
params['lang'] = self.lang
if kind:
params['kind'] = kind
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
) | 0.002258 |
def _validate(self, validator, data, key, position=None, includes=None):
"""
Run through a schema and a data structure,
validating along the way.
Ignores fields that are in the data structure, but not in the schema.
Returns an array of errors.
"""
errors = []
if position:
position = '%s.%s' % (position, key)
else:
position = key
try: # Pull value out of data. Data can be a map or a list/sequence
data_item = util.get_value(data, key)
except KeyError: # Oops, that field didn't exist.
if validator.is_optional: # Optional? Who cares.
return errors
# SHUT DOWN EVERTYHING
errors.append('%s: Required field missing' % position)
return errors
return self._validate_item(validator, data_item, position, includes) | 0.002188 |
def starter(comm_q, *args, **kwargs):
"""Start the interchange process
The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__
"""
# logger = multiprocessing.get_logger()
ic = Interchange(*args, **kwargs)
comm_q.put(ic.worker_port)
ic.start()
logger.debug("Port information sent back to client") | 0.005319 |
def get_cert_profile_kwargs(name=None):
"""Get kwargs suitable for get_cert X509 keyword arguments from the given profile."""
if name is None:
name = ca_settings.CA_DEFAULT_PROFILE
profile = deepcopy(ca_settings.CA_PROFILES[name])
kwargs = {
'cn_in_san': profile['cn_in_san'],
'subject': get_default_subject(name=name),
}
key_usage = profile.get('keyUsage')
if key_usage and key_usage.get('value'):
kwargs['key_usage'] = KeyUsage(key_usage)
ext_key_usage = profile.get('extendedKeyUsage')
if ext_key_usage and ext_key_usage.get('value'):
kwargs['extended_key_usage'] = ExtendedKeyUsage(ext_key_usage)
tls_feature = profile.get('TLSFeature')
if tls_feature and tls_feature.get('value'):
kwargs['tls_feature'] = TLSFeature(tls_feature)
if profile.get('ocsp_no_check'):
kwargs['ocsp_no_check'] = profile['ocsp_no_check']
return kwargs | 0.002121 |