text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def calculate_z1pt0(vs30):
'''
Reads an array of vs30 values (in m/s) and
returns the depth to the 1.0 km/s velocity horizon (in m)
Ref: Chiou & Youngs (2014) California model
:param vs30: the shear wave velocity (in m/s) at a depth of 30m
'''
c1 = 571 ** 4.
c2 = 1360.0 ** 4.
return numpy.exp((-7.15 / 4.0) * numpy.log((vs30 ** 4. + c1) / (c2 + c1))) | 0.002584 |
def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, )) | 0.009532 |
def filter_genes_only(stmts_in, **kwargs):
"""Filter to statements containing genes only.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
specific_only : Optional[bool]
If True, only elementary genes/proteins will be kept and families
will be filtered out. If False, families are also included in the
output. Default: False
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes bound conditions that are not genes
If false (default), filters out statements with non-gene bound
conditions
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
remove_bound = 'remove_bound' in kwargs and kwargs['remove_bound']
specific_only = kwargs.get('specific_only')
logger.info('Filtering %d statements for ones containing genes only...' %
len(stmts_in))
stmts_out = []
for st in stmts_in:
genes_only = True
for agent in st.agent_list():
if agent is not None:
criterion = lambda a: _agent_is_gene(a, specific_only)
if not criterion(agent):
genes_only = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
else:
if _any_bound_condition_fails_criterion(agent, criterion):
genes_only = False
break
if genes_only:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | 0.001591 |
def data(self, column, role):
"""Return the data for the column and role
:param column: the data column
:type column: int
:param role: the data role
:type role: QtCore.Qt.ItemDataRole
:returns: data depending on the role
:rtype:
:raises: None
"""
if self._data is not None and (column >= 0 or column < self._data.column_count()):
return self._data.data(column, role) | 0.006536 |
def extractRunInto(javaLogText):
"""
This function will extract the various operation time for GLRM model building iterations.
:param javaLogText:
:return:
"""
global g_initialXY
global g_reguarlize_Y
global g_regularize_X_objective
global g_updateX
global g_updateY
global g_objective
global g_stepsize
global g_history
if os.path.isfile(javaLogText):
run_result = dict()
run_result["total time (ms)"] = []
run_result["initialXY (ms)"] = []
run_result["regularize Y (ms)"] = []
run_result["regularize X and objective (ms)"] = []
run_result["update X (ms)"] = []
run_result["update Y (ms)"] = []
run_result["objective (ms)"] = []
run_result["step size (ms)"] = []
run_result["update history (ms)"] = []
total_run_time = -1
val = 0.0
with open(javaLogText, 'r') as thefile: # go into tempfile and grab test run info
for each_line in thefile:
temp_string = each_line.split()
if len(temp_string) > 0:
val = temp_string[-1].replace('\\','')
if g_initialXY in each_line: # start of a new file
if total_run_time > 0: # update total run time
run_result["total time (ms)"].append(total_run_time)
total_run_time = 0.0
else:
total_run_time = 0.0
run_result["initialXY (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_reguarlize_Y in each_line:
run_result["regularize Y (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_regularize_X_objective in each_line:
run_result["regularize X and objective (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_updateX in each_line:
run_result["update X (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_updateY in each_line:
run_result["update Y (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_objective in each_line:
run_result["objective (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_stepsize in each_line:
run_result["step size (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
if g_history in each_line:
run_result["update history (ms)"].append(float(val))
total_run_time = total_run_time+float(val)
run_result["total time (ms)"].append(total_run_time) # save the last one
print("Run result summary: \n {0}".format(run_result))
else:
print("Cannot find your java log file. Nothing is done.\n") | 0.002236 |
def init_session(self, get_token=True):
"""
init a new oauth2 session that is required to access the cloud
:param bool get_token: if True, a token will be obtained, after
the session has been created
"""
if (self._client_id is None) or (self._client_secret is None):
sys.exit(
"Please make sure to set the client id and client secret "
"via the constructor, the environment variables or the config "
"file; otherwise, the LaMetric cloud cannot be accessed. "
"Abort!"
)
self._session = OAuth2Session(
client=BackendApplicationClient(client_id=self._client_id)
)
if get_token is True:
# get oauth token
self.get_token() | 0.002381 |
def make_logging_handlers_and_tools(self, multiproc=False):
"""Creates logging handlers and redirects stdout."""
log_stdout = self.log_stdout
if sys.stdout is self._stdout_to_logger:
# If we already redirected stdout we don't neet to redo it again
log_stdout = False
if self.log_config:
if multiproc:
proc_log_config = self._mp_config
else:
proc_log_config = self._sp_config
if proc_log_config:
if isinstance(proc_log_config, dict):
new_dict = self._handle_dict_config(proc_log_config)
dictConfig(new_dict)
else:
parser = self._handle_config_parsing(proc_log_config)
memory_file = self._parser_to_string_io(parser)
fileConfig(memory_file, disable_existing_loggers=False)
if log_stdout:
# Create a logging mock for stdout
std_name, std_level = self.log_stdout
stdout = StdoutToLogger(std_name, log_level=std_level)
stdout.start()
self._tools.append(stdout) | 0.001688 |
def pickAttachment(self):
"""
Prompts the user to select an attachment to add to this edit.
"""
filename = QFileDialog.getOpenFileName(self.window(),
'Select Attachment',
'',
'All Files (*.*)')
if type(filename) == tuple:
filename = nativestring(filename[0])
filename = nativestring(filename)
if filename:
self.addAttachment(os.path.basename(filename), filename) | 0.006504 |
def decrease_weight(self, proxy):
"""Decreasing the weight of a proxy by multiplying dec_ratio"""
new_weight = proxy.weight * self.dec_ratio
if new_weight < self.weight_thr:
self.remove_proxy(proxy)
else:
proxy.weight = new_weight | 0.006993 |
def get_pattern_formatter(cls, location):
"""
Fragment from aiohttp.web_urldispatcher.UrlDispatcher#add_resource
:param location:
:return:
"""
pattern = ''
formatter = ''
canon = ''
for part in cls.ROUTE_RE.split(location):
match = cls.DYN.match(part)
if match:
pattern += '(?P<{}>{})'.format(match.group('var'), cls.GOOD)
formatter += '{' + match.group('var') + '}'
continue
match = cls.DYN_WITH_RE.match(part)
if match:
pattern += '(?P<{var}>{re})'.format(**match.groupdict())
formatter += '{' + match.group('var') + '}'
canon += match.group('re')
continue
if '{' in part or '}' in part:
raise ValueError("Invalid path '{}'['{}']".format(
location, part))
formatter += part
pattern += re.escape(part)
canon += part
try:
return re.compile(pattern), formatter, canon
except re.error as exc:
raise ValueError(
"Bad pattern '{}': {}".format(pattern, exc)) from None | 0.001614 |
def remove_keywords_from_list(self, keyword_list):
"""To remove keywords present in list
Args:
keyword_list (list(str)): List of keywords to remove
Examples:
>>> keyword_processor.remove_keywords_from_list(["java", "python"]})
Raises:
AttributeError: If `keyword_list` is not a list.
"""
if not isinstance(keyword_list, list):
raise AttributeError("keyword_list should be a list")
for keyword in keyword_list:
self.remove_keyword(keyword) | 0.007117 |
def update_content_item(access_token, content_item_id, payload):
'''
Name: update_content_item
Parameters: access_token, content_item_id, payload (dict)
Return: dictionary
'''
headers = {'Authorization': 'Bearer ' + str(access_token)}
content_item_url =\
construct_content_item_url(enrichment_url, content_item_id)
payload = create_random_payload(payload)
request = requests.put(content_item_url, json=payload, headers=headers)
if request.status_code == 200:
content_item = request.json()
return content_item
return {'status': request.status_code, "message": request.text} | 0.025253 |
def template2regex(template, ranges=None):
"""Convert a URL template to a regular expression.
Converts a template, such as /{name}/ to a regular expression, e.g.
/(?P<name>[^/]+)/ and a list of the named parameters found in the template
(e.g. ['name']). Ranges are given after a colon in a template name to
indicate a restriction on the characters that can appear there. For
example, in the template:
"/user/{id:alpha}"
The `id` must contain only characters from a-zA-Z. Other characters there
will cause the pattern not to match.
The ranges parameter is an optional dictionary that maps range names to
regular expressions. New range names can be added, or old range names can
be redefined using this parameter.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2regex("{fred}")
('^(?P<fred>[^/]+)$', ['fred'])
"""
if len(template) and -1 < template.find('|') < len(template) - 1:
raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template))
if ranges is None:
ranges = DEFAULT_RANGES
anchor = True
state = S_PATH
if len(template) and template[-1] == '|':
anchor = False
params = []
bracketdepth = 0
result = ['^']
name = ""
pattern = "[^/]+"
rangename = None
for c in template_splitter.split(template):
if state == S_PATH:
if len(c) > 1:
result.append(re.escape(c))
elif c == '[':
result.append("(")
bracketdepth += 1
elif c == ']':
bracketdepth -= 1
if bracketdepth < 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
result.append(")?")
elif c == '{':
name = ""
state = S_TEMPLATE
elif c == '}':
raise InvalidTemplateError("Mismatched braces in %s" % template)
elif c == '|':
pass
else:
result.append(re.escape(c))
else:
if c == '}':
if rangename and rangename in ranges:
result.append("(?P<%s>%s)" % (name, ranges[rangename]))
else:
result.append("(?P<%s>%s)" % (name, pattern))
params.append(name)
state = S_PATH
rangename = None
else:
name = c
if name.find(":") > -1:
name, rangename = name.split(":")
if bracketdepth != 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
if state == S_TEMPLATE:
raise InvalidTemplateError("Mismatched braces in %s" % template)
if anchor:
result.append('$')
return "".join(result), params | 0.00137 |
def publish(
self, resource_group_name, automation_account_name, runbook_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Publish runbook draft.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param runbook_name: The parameters supplied to the publish runbook
operation.
:type runbook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.automation.models.ErrorResponseException>`
"""
raw_result = self._publish_initial(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
runbook_name=runbook_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'location': 'str',
})
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | 0.003462 |
def create_date_from_text(self, text):
"""
Parse a text in the form dd/mm/yyyy, dd/mm/yy or yyyy/mm/dd and return a corresponding :class:`datetime.date`
object. If no date can be extracted from the given text, a :exc:`ValueError` will be raised.
"""
# Try to match dd/mm/yyyy format
date_matches = re.match(self.DATE_LINE_REGEXP, text)
# If no match, try with yyyy/mm/dd format
if date_matches is None:
date_matches = re.match(self.US_DATE_LINE_REGEXP, text)
if date_matches is None:
raise ValueError("No date could be extracted from the given value")
# yyyy/mm/dd
if len(date_matches.group(1)) == 4:
return datetime.date(int(date_matches.group(1)), int(date_matches.group(2)), int(date_matches.group(3)))
# dd/mm/yy
if len(date_matches.group(3)) == 2:
current_year = datetime.date.today().year
current_millennium = current_year - (current_year % 1000)
year = current_millennium + int(date_matches.group(3))
# dd/mm/yyyy
else:
year = int(date_matches.group(3))
return datetime.date(year, int(date_matches.group(2)), int(date_matches.group(1))) | 0.004762 |
def restore_from_checkpoint(sess, input_checkpoint):
"""Return a TensorFlow saver from a checkpoint containing the metagraph."""
saver = tf.train.import_meta_graph('{}.meta'.format(input_checkpoint))
saver.restore(sess, input_checkpoint)
return saver | 0.003759 |
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence
of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column
corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S | 0.001522 |
def close(self):
"""Close the connection
:raises: ConnectionBusyError
"""
LOGGER.debug('Connection %s closing', self.id)
if self.busy:
raise ConnectionBusyError(self)
with self._lock:
if not self.handle.closed:
try:
self.handle.close()
except psycopg2.InterfaceError as error:
LOGGER.error('Error closing socket: %s', error) | 0.004255 |
def hardcodeRomIntoProcess(cls, rom):
"""
Due to verilog restrictions it is not posible to use array constants
and rom memories has to be hardcoded as process
"""
processes = []
signals = []
for e in rom.endpoints:
assert isinstance(e, Operator) and e.operator == AllOps.INDEX, e
me, index = e.operands
assert me is rom
# construct output of the rom
romValSig = rom.ctx.sig(rom.name, dtype=e.result._dtype)
signals.append(romValSig)
romValSig.hidden = False
# construct process which will represent content of the rom
cases = [(toHVal(i), [romValSig(v), ])
for i, v in enumerate(rom.defVal.val)]
statements = [SwitchContainer(index, cases), ]
for (_, (stm, )) in cases:
stm.parentStm = statements[0]
p = HWProcess(rom.name, statements, {index, },
{index, }, {romValSig, })
processes.append(p)
# override usage of original index operator on rom
# to use signal generated from this process
def replaceOrigRomIndexExpr(x):
if x is e.result:
return romValSig
else:
return x
for _e in e.result.endpoints:
_e.operands = tuple(map(replaceOrigRomIndexExpr, _e.operands))
e.result = romValSig
return processes, signals | 0.001934 |
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
serial = obj.isoformat()
return serial
from ..time_interval import TimeInterval, TimeIntervals
if isinstance(obj, (TimeInterval, TimeIntervals)):
return obj.to_json()
from ..stream import StreamId
if isinstance(obj, StreamId):
return obj.to_json()
from ..channels import BaseChannel
if isinstance(obj, BaseChannel):
return json.dumps({'channel_id': obj.channel_id})
raise TypeError("Type %s not serializable" % type(obj)) | 0.001577 |
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples) | 0.002077 |
def format_sync_stats(self, cnt):
'''
Format stats of the sync output.
:param cnt:
:return:
'''
stats = salt.utils.odict.OrderedDict()
if cnt.get('retcode') == salt.defaults.exitcodes.EX_OK:
for line in cnt.get('stdout', '').split(os.linesep):
line = line.split(': ')
if len(line) == 2:
stats[line[0].lower().replace(' ', '_')] = line[1]
cnt['transfer'] = stats
del cnt['stdout']
# Remove empty
empty_sections = []
for section in cnt:
if not cnt[section] and section != 'retcode':
empty_sections.append(section)
for section in empty_sections:
del cnt[section]
return cnt | 0.002509 |
def set_col_name(self, index, name):
"""
Sets the column name.
:param index: the 0-based row index
:type index: int
:param name: the name of the column
:type name: str
"""
javabridge.call(self.jobject, "setColName", "(ILjava/lang/String;)V", index, name) | 0.009404 |
def get_protein_data(peptide, pdata, headerfields, accfield):
"""These fields are currently not pool dependent so headerfields
is ignored"""
report = get_proteins(peptide, pdata, headerfields)
return get_cov_descriptions(peptide, pdata, report) | 0.003846 |
def add(self, p_src):
"""
Given a todo string, parse it and put it to the end of the list.
"""
todos = self.add_list([p_src])
return todos[0] if len(todos) else None | 0.009709 |
def _set_cir(self, v, load=False):
"""
Setter method for cir, mapped from YANG variable /policy_map/class/police/cir (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cir is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cir() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cir must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)""",
})
self.__cir = t
if hasattr(self, '_set'):
self._set() | 0.004637 |
def getparams(self, param):
"""Get parameters which match with input param.
:param Parameter param: parameter to compare with this parameters.
:rtype: list
"""
return list(cparam for cparam in self.values() if cparam == param) | 0.007463 |
def set_meta_refresh_enabled(self, enabled):
"""
*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.
"""
warn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0. Set "
"Cluster.schema_metadata_enabled and Cluster.token_metadata_enabled instead.", DeprecationWarning)
self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled | 0.008373 |
async def close(self) -> None:
"""
Explicit exit. If so configured, populate cache to prove all creds in
wallet offline if need be, archive cache, and purge prior cache archives.
:return: current object
"""
LOGGER.debug('HolderProver.close >>>')
if self.cfg.get('archive-cache-on-close', False):
await self.load_cache(True)
Caches.purge_archives(self.dir_cache, True)
await super().close()
for path_rr_id in Tails.links(self._dir_tails):
rr_id = basename(path_rr_id)
try:
await self._sync_revoc(rr_id)
except ClosedPool:
LOGGER.warning('HolderProver sync-revoc on close required ledger for %s but pool was closed', rr_id)
LOGGER.debug('HolderProver.close <<<') | 0.004779 |
def get_user_profile(self, user_id):
"""
Get user profile.
Returns user profile data, including user id, name, and profile pic.
When requesting the profile for the user accessing the API, the user's
calendar feed URL will be returned as well.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
self.logger.debug("GET /api/v1/users/{user_id}/profile with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/profile".format(**path), data=data, params=params, single_item=True) | 0.006579 |
def model_getattr():
"""
Creates a getter that will drop the current value
and retrieve the model's attribute with the context key as name.
"""
def model_getattr(_value, context, **_params):
value = getattr(context["model"], context["key"])
return _attr(value)
return model_getattr | 0.003096 |
def list_forwarding_addresses(api_key, offset=None, coin_symbol='btc'):
'''
List the forwarding addresses for a certain api key
(and on a specific blockchain)
'''
assert is_valid_coin_symbol(coin_symbol)
assert api_key
url = make_url(coin_symbol, 'payments')
params = {'token': api_key}
if offset:
params['start'] = offset
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r) | 0.004149 |
def datetime_from_iso_format(string):
"""
Return a datetime object from an iso 8601 representation.
Return None if string is non conforming.
"""
match = DATE_ISO_REGEX.match(string)
if match:
date = datetime.datetime(year=int(match.group(DATE_ISO_YEAR_GRP)),
month=int(match.group(DATE_ISO_MONTH_GRP)),
day=int(match.group(DATE_ISO_DAY_GRP)),
hour=int(match.group(DATE_ISO_HOUR_GRP)),
second=int(match.group(DATE_ISO_SEC_GRP)),
minute=int(match.group(DATE_ISO_MIN_GRP)))
return date
else:
return None | 0.001395 |
def send_patch_document(self, event):
""" Sends a PATCH-DOC message, returning a Future that's completed when it's written out. """
msg = self.protocol.create('PATCH-DOC', [event])
return self._socket.send_message(msg) | 0.012397 |
def load_json_dct(
dct,
record_store=None,
schema=None,
loader=from_json_compatible
):
""" Create a Record instance from a json-compatible dictionary
The dictionary values should have types that are json compatible,
as if just loaded from a json serialized record string.
:param dct:
Python dictionary with key/value pairs for the record
:param record_store:
Record store to use for schema lookups (when $schema field is present)
:param schema:
PySchema Record class for the record to load.
This will override any $schema fields specified in `dct`
"""
if schema is None:
if record_store is None:
record_store = auto_store
try:
schema_name = dct.pop(SCHEMA_FIELD_NAME)
except KeyError:
raise ParseError((
"Serialized record missing '{0}' "
"record identifier and no schema supplied")
.format(SCHEMA_FIELD_NAME)
)
try:
schema = record_store.get(schema_name)
except KeyError:
raise ParseError(
"Can't recognize record type %r"
% (schema_name,), schema_name)
# if schema is explicit, use that instead of SCHEMA_FIELD_NAME
elif SCHEMA_FIELD_NAME in dct:
dct.pop(SCHEMA_FIELD_NAME)
record = loader(schema, dct)
return record | 0.000696 |
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.') | 0.008065 |
def get_separator_words(toks1):
"""
Finds the words that separate a list of tokens from a background corpus
Basically this generates a list of informative/interesting words in a set
toks1 is a list of words
Returns a list of separator words
"""
tab_toks1 = nltk.FreqDist(word.lower() for word in toks1)
if(os.path.isfile(ESSAY_COR_TOKENS_PATH)):
toks2 = pickle.load(open(ESSAY_COR_TOKENS_PATH, 'rb'))
else:
essay_corpus = open(ESSAY_CORPUS_PATH).read()
essay_corpus = sub_chars(essay_corpus)
toks2 = nltk.FreqDist(word.lower() for word in nltk.word_tokenize(essay_corpus))
pickle.dump(toks2, open(ESSAY_COR_TOKENS_PATH, 'wb'))
sep_words = []
for word in tab_toks1.keys():
tok1_present = tab_toks1[word]
if(tok1_present > 2):
tok1_total = tab_toks1._N
tok2_present = toks2[word]
tok2_total = toks2._N
fish_val = pvalue(tok1_present, tok2_present, tok1_total, tok2_total).two_tail
if(fish_val < .001 and tok1_present / float(tok1_total) > (tok2_present / float(tok2_total)) * 2):
sep_words.append(word)
sep_words = [w for w in sep_words if not w in nltk.corpus.stopwords.words("english") and len(w) > 5]
return sep_words | 0.004608 |
def create(self, request):
"""Create a new object."""
form = (self.form or generate_form(self.model))(request.POST)
if form.is_valid():
object = form.save()
return self._render(
request = request,
template = 'show',
context = {
cc2us(self.model.__name__): object
},
status = 201
)
else:
return self._render(
request = request,
template = 'new',
context = {
'form': form
},
status = 400
) | 0.026432 |
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection._sock:
raise err.InterfaceError("(0, '')")
conn = self.connection
try:
with open(self.filename, 'rb') as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename))
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b'') | 0.004981 |
def _get_host_disks(host_reference):
'''
Helper function that returns a dictionary containing a list of SSD and Non-SSD disks.
'''
storage_system = host_reference.configManager.storageSystem
disks = storage_system.storageDeviceInfo.scsiLun
ssds = []
non_ssds = []
for disk in disks:
try:
has_ssd_attr = disk.ssd
except AttributeError:
has_ssd_attr = False
if has_ssd_attr:
ssds.append(disk)
else:
non_ssds.append(disk)
return {'SSDs': ssds, 'Non-SSDs': non_ssds} | 0.003448 |
def parse_timestamp(x):
"""Parse ISO8601 formatted timestamp."""
dt = dateutil.parser.parse(x)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=pytz.utc)
return dt | 0.005464 |
def printInfo(obj):
"""Print information about a vtk object."""
def printvtkactor(actor, tab=""):
if not actor.GetPickable():
return
if hasattr(actor, "polydata"):
poly = actor.polydata()
else:
poly = actor.GetMapper().GetInput()
pro = actor.GetProperty()
pos = actor.GetPosition()
bnds = actor.GetBounds()
col = pro.GetColor()
colr = precision(col[0], 3)
colg = precision(col[1], 3)
colb = precision(col[2], 3)
alpha = pro.GetOpacity()
npt = poly.GetNumberOfPoints()
ncl = poly.GetNumberOfCells()
print(tab, end="")
colors.printc("vtkActor", c="g", bold=1, invert=1, dim=1, end=" ")
if hasattr(actor, "_legend") and actor._legend:
colors.printc("legend: ", c="g", bold=1, end="")
colors.printc(actor._legend, c="g", bold=0)
else:
print()
if hasattr(actor, "filename") and actor.filename:
colors.printc(tab + " file: ", c="g", bold=1, end="")
colors.printc(actor.filename, c="g", bold=0)
colors.printc(tab + " color: ", c="g", bold=1, end="")
if actor.GetMapper().GetScalarVisibility():
colors.printc("defined by point or cell data", c="g", bold=0)
else:
colors.printc(colors.getColorName(col) + ', rgb=('+colr+', '
+ colg+', '+colb+'), alpha='+str(alpha), c='g', bold=0)
if actor.GetBackfaceProperty():
bcol = actor.GetBackfaceProperty().GetDiffuseColor()
bcolr = precision(bcol[0], 3)
bcolg = precision(bcol[1], 3)
bcolb = precision(bcol[2], 3)
colors.printc(tab+' back color: ', c='g', bold=1, end='')
colors.printc(colors.getColorName(bcol) + ', rgb=('+bcolr+', '
+ bcolg+', ' + bcolb+')', c='g', bold=0)
colors.printc(tab + " points: ", c="g", bold=1, end="")
colors.printc(npt, c="g", bold=0)
colors.printc(tab + " cells: ", c="g", bold=1, end="")
colors.printc(ncl, c="g", bold=0)
colors.printc(tab + " position: ", c="g", bold=1, end="")
colors.printc(pos, c="g", bold=0)
if hasattr(actor, "polydata"):
colors.printc(tab + " c. of mass: ", c="g", bold=1, end="")
colors.printc(actor.centerOfMass(), c="g", bold=0)
colors.printc(tab + " ave. size: ", c="g", bold=1, end="")
colors.printc(precision(actor.averageSize(), 4), c="g", bold=0)
colors.printc(tab + " diag. size: ", c="g", bold=1, end="")
colors.printc(actor.diagonalSize(), c="g", bold=0)
colors.printc(tab + " area: ", c="g", bold=1, end="")
colors.printc(precision(actor.area(), 8), c="g", bold=0)
colors.printc(tab + " volume: ", c="g", bold=1, end="")
colors.printc(precision(actor.volume(), 8), c="g", bold=0)
colors.printc(tab + " bounds: ", c="g", bold=1, end="")
bx1, bx2 = precision(bnds[0], 3), precision(bnds[1], 3)
colors.printc("x=(" + bx1 + ", " + bx2 + ")", c="g", bold=0, end="")
by1, by2 = precision(bnds[2], 3), precision(bnds[3], 3)
colors.printc(" y=(" + by1 + ", " + by2 + ")", c="g", bold=0, end="")
bz1, bz2 = precision(bnds[4], 3), precision(bnds[5], 3)
colors.printc(" z=(" + bz1 + ", " + bz2 + ")", c="g", bold=0)
arrtypes = dict()
arrtypes[vtk.VTK_UNSIGNED_CHAR] = "VTK_UNSIGNED_CHAR"
arrtypes[vtk.VTK_UNSIGNED_INT] = "VTK_UNSIGNED_INT"
arrtypes[vtk.VTK_FLOAT] = "VTK_FLOAT"
arrtypes[vtk.VTK_DOUBLE] = "VTK_DOUBLE"
if poly.GetPointData():
ptdata = poly.GetPointData()
for i in range(ptdata.GetNumberOfArrays()):
name = ptdata.GetArrayName(i)
if name:
colors.printc(tab + " point data: ", c="g", bold=1, end="")
try:
tt = arrtypes[ptdata.GetArray(i).GetDataType()]
colors.printc("name=" + name, "type=" + tt, c="g", bold=0)
except:
tt = ptdata.GetArray(i).GetDataType()
colors.printc("name=" + name, "type=", tt, c="g", bold=0)
if poly.GetCellData():
cldata = poly.GetCellData()
for i in range(cldata.GetNumberOfArrays()):
name = cldata.GetArrayName(i)
if name:
colors.printc(tab + " cell data: ", c="g", bold=1, end="")
try:
tt = arrtypes[cldata.GetArray(i).GetDataType()]
colors.printc("name=" + name, "type=" + tt, c="g", bold=0)
except:
tt = cldata.GetArray(i).GetDataType()
colors.printc("name=" + name, "type=", tt, c="g", bold=0)
if not obj:
return
elif isinstance(obj, vtk.vtkActor):
colors.printc("_" * 60, c="g", bold=0)
printvtkactor(obj)
elif isinstance(obj, vtk.vtkAssembly):
colors.printc("_" * 60, c="g", bold=0)
colors.printc("vtkAssembly", c="g", bold=1, invert=1, end=" ")
if hasattr(obj, "_legend"):
colors.printc("legend: ", c="g", bold=1, end="")
colors.printc(obj._legend, c="g", bold=0)
else:
print()
pos = obj.GetPosition()
bnds = obj.GetBounds()
colors.printc(" position: ", c="g", bold=1, end="")
colors.printc(pos, c="g", bold=0)
colors.printc(" bounds: ", c="g", bold=1, end="")
bx1, bx2 = precision(bnds[0], 3), precision(bnds[1], 3)
colors.printc("x=(" + bx1 + ", " + bx2 + ")", c="g", bold=0, end="")
by1, by2 = precision(bnds[2], 3), precision(bnds[3], 3)
colors.printc(" y=(" + by1 + ", " + by2 + ")", c="g", bold=0, end="")
bz1, bz2 = precision(bnds[4], 3), precision(bnds[5], 3)
colors.printc(" z=(" + bz1 + ", " + bz2 + ")", c="g", bold=0)
cl = vtk.vtkPropCollection()
obj.GetActors(cl)
cl.InitTraversal()
for i in range(obj.GetNumberOfPaths()):
act = vtk.vtkActor.SafeDownCast(cl.GetNextProp())
if isinstance(act, vtk.vtkActor):
printvtkactor(act, tab=" ")
elif hasattr(obj, "interactor"): # dumps Plotter info
axtype = {
0: "(no axes)",
1: "(three gray grid walls)",
2: "(cartesian axes from origin",
3: "(positive range of cartesian axes from origin",
4: "(axes triad at bottom left)",
5: "(oriented cube at bottom left)",
6: "(mark the corners of the bounding box)",
7: "(ruler at the bottom of the window)",
8: "(the vtkCubeAxesActor object)",
9: "(the bounding box outline)",
10: "(circles of maximum bounding box range)",
}
bns, totpt = [], 0
for a in obj.actors:
b = a.GetBounds()
if a.GetBounds() is not None:
if isinstance(a, vtk.vtkActor):
totpt += a.GetMapper().GetInput().GetNumberOfPoints()
bns.append(b)
if len(bns) == 0:
return
acts = obj.getActors()
colors.printc("_" * 60, c="c", bold=0)
colors.printc("Plotter", invert=1, dim=1, c="c", end=" ")
otit = obj.title
if not otit:
otit = None
colors.printc(" title:", otit, bold=0, c="c")
colors.printc(" active renderer:", obj.renderers.index(obj.renderer), bold=0, c="c")
colors.printc(" nr. of actors:", len(acts), bold=0, c="c", end="")
colors.printc(" (" + str(totpt), "vertices)", bold=0, c="c")
max_bns = np.max(bns, axis=0)
min_bns = np.min(bns, axis=0)
colors.printc(" max bounds: ", c="c", bold=0, end="")
bx1, bx2 = precision(min_bns[0], 3), precision(max_bns[1], 3)
colors.printc("x=(" + bx1 + ", " + bx2 + ")", c="c", bold=0, end="")
by1, by2 = precision(min_bns[2], 3), precision(max_bns[3], 3)
colors.printc(" y=(" + by1 + ", " + by2 + ")", c="c", bold=0, end="")
bz1, bz2 = precision(min_bns[4], 3), precision(max_bns[5], 3)
colors.printc(" z=(" + bz1 + ", " + bz2 + ")", c="c", bold=0)
colors.printc(" axes type:", obj.axes, axtype[obj.axes], bold=0, c="c")
for a in obj.actors:
if a.GetBounds() is not None:
if isinstance(a, vtk.vtkVolume): # dumps Volume info
img = a.GetMapper().GetDataSetInput()
colors.printc('_'*60, c='b', bold=0)
colors.printc('Volume', invert=1, dim=1, c='b')
colors.printc(' scalar range:',
np.round(img.GetScalarRange(), 4), c='b', bold=0)
bnds = a.GetBounds()
colors.printc(" bounds: ", c="b", bold=0, end="")
bx1, bx2 = precision(bnds[0], 3), precision(bnds[1], 3)
colors.printc("x=(" + bx1 + ", " + bx2 + ")", c="b", bold=0, end="")
by1, by2 = precision(bnds[2], 3), precision(bnds[3], 3)
colors.printc(" y=(" + by1 + ", " + by2 + ")", c="b", bold=0, end="")
bz1, bz2 = precision(bnds[4], 3), precision(bnds[5], 3)
colors.printc(" z=(" + bz1 + ", " + bz2 + ")", c="b", bold=0)
colors.printc(" Click actor and press i for Actor info.", c="c")
else:
colors.printc("_" * 60, c="g", bold=0)
colors.printc(obj, c="g")
colors.printc(type(obj), c="g", invert=1) | 0.001703 |
def resources(argv=sys.argv[1:]):
"""
Juju CLI subcommand for dispatching resources subcommands.
"""
eps = iter_entry_points('jujuresources.subcommands')
ep_map = {ep.name: ep.load() for ep in eps}
parser = argparse.ArgumentParser()
if '--description' in argv:
print('Manage and mirror charm resources')
return 0
subparsers = {}
subparser_factory = parser.add_subparsers()
subparsers['help'] = subparser_factory.add_parser('help', help='Display help for a subcommand')
subparsers['help'].add_argument('command', nargs='?')
subparsers['help'].set_defaults(subcommand='help')
for name, subcommand in ep_map.items():
subparsers[name] = subparser_factory.add_parser(name, help=subcommand.__doc__)
subparsers[name].set_defaults(subcommand=subcommand)
for args, kwargs in getattr(subcommand, '_subcommand_args', []):
subparsers[name].add_argument(*args, **kwargs)
for argset in getattr(subcommand, '_subcommand_argsets', {}).values():
group = subparsers[name].add_mutually_exclusive_group(required=True)
for args, kwargs in argset:
group.add_argument(*args, **kwargs)
opts = parser.parse_args(argv)
if opts.subcommand == 'help':
if opts.command:
subparsers[opts.command].print_help()
else:
parser.print_help()
else:
return _exit(opts.subcommand(opts) or 0) | 0.002732 |
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
a = self.area * 2
return [a / self.a, a / self.b, a / self.c] | 0.006431 |
def custom_getter(self, activation_dtype=tf.bfloat16):
"""A custom getter that uses the encoding for bfloat16 and float32 vars.
When a bfloat16 or float32 variable is requsted, an encoded float16
varaible is created, which is then decoded and cast to a bfloat16
activation.
Args:
activation_dtype: a dtype to which to convert the decoded value.
Returns:
a function.
"""
def getter_fn(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype in (tf.bfloat16, tf.float32):
kwargs["dtype"] = tf.bfloat16
kwargs["initializer"] = _EncodingInitializer(
kwargs["initializer"], self)
ret = self._decode_with_identity_gradient(getter(*args, **kwargs))
return tf.cast(ret, activation_dtype)
return getter(*args, **kwargs)
return getter_fn | 0.004635 |
def aliases(self):
"""Returns a dictionary of the aliases, or "titles", of the field names
in self. An alias can be specified by passing a tuple in the name
part of the dtype. For example, if an array is created with
``dtype=[(('foo', 'bar'), float)]``, the array will have a field
called `bar` that has alias `foo` that can be accessed using
either `arr['foo']` or `arr['bar']`. Note that the first string
in the dtype is the alias, the second the name. This function returns
a dictionary in which the aliases are the keys and the names are the
values. Only fields that have aliases are returned.
"""
return dict(c[0] for c in self.dtype.descr if isinstance(c[0], tuple)) | 0.002632 |
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) | 0.001672 |
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs) | 0.003831 |
def _set_reverse_metric_info(self, v, load=False):
"""
Setter method for reverse_metric_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/reverse_metric_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_info() directly.
YANG Description: ISIS interface reverse-metric configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=reverse_metric_info.reverse_metric_info, is_container='container', presence=False, yang_name="reverse-metric-info", rest_name="reverse-metric-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-reverse-metric-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reverse_metric_info must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=reverse_metric_info.reverse_metric_info, is_container='container', presence=False, yang_name="reverse-metric-info", rest_name="reverse-metric-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-reverse-metric-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__reverse_metric_info = t
if hasattr(self, '_set'):
self._set() | 0.005074 |
def import_machines(self, options):
"""Imports the appliance into VirtualBox by creating instances of :py:class:`IMachine`
and other interfaces that match the information contained in the appliance as
closely as possible, as represented by the import instructions in the
:py:func:`virtual_system_descriptions` array.
Calling this method is the final step of importing an appliance into VirtualBox;
see :py:class:`IAppliance` for an overview.
Since importing the appliance will most probably involve copying and converting
disk images, which can take a long time, this method operates asynchronously and
returns an IProgress object to allow the caller to monitor the progress.
After the import succeeded, the UUIDs of the IMachine instances created can be
retrieved from the :py:func:`machines` array attribute.
in options of type :class:`ImportOptions`
Options for the importing operation.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
"""
if not isinstance(options, list):
raise TypeError("options can only be an instance of type list")
for a in options[:10]:
if not isinstance(a, ImportOptions):
raise TypeError(
"array can only contain objects of type ImportOptions")
progress = self._call("importMachines",
in_p=[options])
progress = IProgress(progress)
return progress | 0.008663 |
def eval_algorithm(curr, prev):
""" Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV
"""
if curr['close'] > prev['close']:
v = curr['volume']
elif curr['close'] < prev['close']:
v = curr['volume'] * -1
else:
v = 0
return prev['obv'] + v | 0.004515 |
def ports(self):
'''The list of ports involved in this connection.
The result is a list of tuples, (port name, port object). Each port
name is a full path to the port (e.g. /localhost/Comp0.rtc:in) if
this Connection object is owned by a Port, which is in turn owned by
a Component in the tree. Otherwise, only the port's name will be used
(in which case it will be the full port name, which will include the
component name, e.g. 'ConsoleIn0.in'). The full path can be used to
find ports in the tree.
If, for some reason, the owner node of a port cannot be found, that
entry in the list will contain ('Unknown', None). This typically means
that a component's name has been clobbered on the name server.
This list will be created at the first reference to this property.
This means that the first reference may be delayed by CORBA calls,
but others will return quickly (unless a delayed reparse has been
triggered).
'''
def has_port(node, args):
if node.get_port_by_ref(args):
return node
return None
with self._mutex:
if not self._ports:
self._ports = []
for p in self._obj.ports:
# My owner's owner is a component node in the tree
if self.owner and self.owner.owner:
root = self.owner.owner.root
owner_nodes = [n for n in root.iterate(has_port,
args=p, filter=['is_component']) if n]
if not owner_nodes:
self._ports.append(('Unknown', None))
else:
port_owner = owner_nodes[0]
port_owner_path = port_owner.full_path_str
port_name = p.get_port_profile().name
prefix = port_owner.instance_name + '.'
if port_name.startswith(prefix):
port_name = port_name[len(prefix):]
self._ports.append((port_owner_path + ':' + \
port_name, parse_port(p, self.owner.owner)))
else:
self._ports.append((p.get_port_profile().name,
parse_port(p, None)))
return self._ports | 0.001994 |
def watch(cams, path=None, delay=10):
"""Get screenshots from all cams at defined intervall."""
while True:
for c in cams:
c.snap(path)
time.sleep(delay) | 0.005291 |
def cause_mip(self, mechanism, purview):
"""Return the irreducibility analysis for the cause MIP.
Alias for |find_mip()| with ``direction`` set to |CAUSE|.
"""
return self.find_mip(Direction.CAUSE, mechanism, purview) | 0.008 |
def recursive_children(self):
"""
Generator returning all recursive children elements.
"""
for child in self.children:
yield child
for recursive_child in child.recursive_children:
yield recursive_child | 0.007273 |
def create_gre_tunnel_no_encryption(cls, name, local_endpoint, remote_endpoint,
mtu=0, pmtu_discovery=True, ttl=0,
enabled=True, comment=None):
"""
Create a GRE Tunnel with no encryption. See `create_gre_tunnel_mode` for
constructor descriptions.
"""
return cls.create_gre_tunnel_mode(
name, local_endpoint, remote_endpoint, policy_vpn=None,
mtu=mtu, pmtu_discovery=pmtu_discovery, ttl=ttl,
enabled=enabled, comment=comment) | 0.009058 |
def reverse(self, lon, lat, types=None, limit=None):
"""Returns a Requests response object that contains a GeoJSON
collection of places near the given longitude and latitude.
`response.geojson()` returns the geocoding result as GeoJSON.
`response.status_code` returns the HTTP API status code.
See: https://www.mapbox.com/api-documentation/search/#reverse-geocoding."""
uri = URITemplate(self.baseuri + '/{dataset}/{lon},{lat}.json').expand(
dataset=self.name,
lon=str(round(float(lon), self.precision.get('reverse', 5))),
lat=str(round(float(lat), self.precision.get('reverse', 5))))
params = {}
if types:
types = list(types)
params.update(self._validate_place_types(types))
if limit is not None:
if not types or len(types) != 1:
raise InvalidPlaceTypeError(
'Specify a single type when using limit with reverse geocoding')
params.update(limit='{0}'.format(limit))
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
# for consistency with other services
def geojson():
return resp.json()
resp.geojson = geojson
return resp | 0.003067 |
def check_extracted_paths(namelist, subdir=None):
"""
Check whether zip file paths are all relative, and optionally in a
specified subdirectory, raises an exception if not
namelist: A list of paths from the zip file
subdir: If specified then check whether all paths in the zip file are
under this subdirectory
Python docs are unclear about the security of extract/extractall:
https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall
https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract
"""
def relpath(p):
# relpath strips a trailing sep
# Windows paths may also use unix sep
q = os.path.relpath(p)
if p.endswith(os.path.sep) or p.endswith('/'):
q += os.path.sep
return q
parent = os.path.abspath('.')
if subdir:
if os.path.isabs(subdir):
raise FileException('subdir must be a relative path', subdir)
subdir = relpath(subdir + os.path.sep)
for name in namelist:
if os.path.commonprefix([parent, os.path.abspath(name)]) != parent:
raise FileException('Insecure path in zipfile', name)
if subdir and os.path.commonprefix(
[subdir, relpath(name)]) != subdir:
raise FileException(
'Path in zipfile is not in required subdir', name) | 0.000726 |
def convert_frame(frame, body_encoding=None):
"""
Convert a frame to a list of lines separated by newlines.
:param Frame frame: the Frame object to convert
:rtype: list(str)
"""
lines = []
body = None
if frame.body:
if body_encoding:
body = encode(frame.body, body_encoding)
else:
body = encode(frame.body)
if HDR_CONTENT_LENGTH in frame.headers:
frame.headers[HDR_CONTENT_LENGTH] = len(body)
if frame.cmd:
lines.append(encode(frame.cmd))
lines.append(ENC_NEWLINE)
for key, vals in sorted(frame.headers.items()):
if vals is None:
continue
if type(vals) != tuple:
vals = (vals,)
for val in vals:
lines.append(encode("%s:%s\n" % (key, val)))
lines.append(ENC_NEWLINE)
if body:
lines.append(body)
if frame.cmd:
lines.append(ENC_NULL)
return lines | 0.001045 |
def get_genetic_profiles(study_id, profile_filter=None):
"""Return all the genetic profiles (data sets) for a given study.
Genetic profiles are different types of data for a given study. For
instance the study 'cellline_ccle_broad' has profiles such as
'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA'
for copy number alterations, etc.
Parameters
----------
study_id : str
The ID of the cBio study.
Example: 'paad_icgc'
profile_filter : Optional[str]
A string used to filter the profiles to return.
Will be one of:
- MUTATION
- MUTATION_EXTENDED
- COPY_NUMBER_ALTERATION
- MRNA_EXPRESSION
- METHYLATION
The genetic profiles can include "mutation", "CNA", "rppa",
"methylation", etc.
Returns
-------
genetic_profiles : list[str]
A list of genetic profiles available for the given study.
"""
data = {'cmd': 'getGeneticProfiles',
'cancer_study_id': study_id}
df = send_request(**data)
res = _filter_data_frame(df, ['genetic_profile_id'],
'genetic_alteration_type', profile_filter)
genetic_profiles = list(res['genetic_profile_id'].values())
return genetic_profiles | 0.00077 |
def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient):
'''Copy directory from local to HDFS'''
if not os.path.exists(localDirectory):
raise Exception('Local Directory does not exist!')
hdfsClient.mkdirs(hdfsDirectory)
result = True
for file in os.listdir(localDirectory):
file_path = os.path.join(localDirectory, file)
if os.path.isdir(file_path):
hdfs_directory = os.path.join(hdfsDirectory, file)
try:
result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception)))
result = False
else:
hdfs_file_path = os.path.join(hdfsDirectory, file)
try:
result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception)))
result = False
return result | 0.004119 |
def _conf(cls, opts):
"""Setup logging via ini-file from logging_conf_file option."""
if not opts.logging_conf_file:
return False
if not os.path.exists(opts.logging_conf_file):
# FileNotFoundError added only in Python 3.3
# https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy
raise OSError("Error: Unable to locate specified logging configuration file!")
logging.config.fileConfig(opts.logging_conf_file, disable_existing_loggers=False)
return True | 0.006861 |
def convex_hull(self):
"""Return an array of vertex indexes representing the convex hull.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._convex_hull | 0.004566 |
def find_geometry(self, physics):
r"""
Find the Geometry associated with a given Physics
Parameters
----------
physics : OpenPNM Physics Object
Must be a Physics object
Returns
-------
An OpenPNM Geometry object
Raises
------
If no Geometry object can be found, then an Exception is raised.
"""
# If geometry happens to be in settings, look it up directly
if 'geometry' in physics.settings.keys():
geom = self.geometries()[physics.settings['geometry']]
return geom
# Otherwise, use the bottom-up approach
for geo in self.geometries().values():
if physics in self.find_physics(geometry=geo):
return geo
# If all else fails, throw an exception
raise Exception('Cannot find a geometry associated with '+physics.name) | 0.002162 |
def get_file_format(self):
"""Get the file format description. This describes the type of
data stored on disk.
"""
# Have cached file format?
if self._file_fmt is not None:
return self._file_fmt
# Make the call to retrieve it.
desc = AudioStreamBasicDescription()
size = ctypes.c_int(ctypes.sizeof(desc))
check(_coreaudio.ExtAudioFileGetProperty(
self._obj, PROP_FILE_DATA_FORMAT, ctypes.byref(size),
ctypes.byref(desc)
))
# Cache result.
self._file_fmt = desc
return desc | 0.003257 |
def popmin_compat(self, count=1):
"""
Atomically remove the lowest-scoring item(s) in the set. Compatible
with Redis versions < 5.0.
:returns: a list of item, score tuples or ``None`` if the set is empty.
"""
pipe = self.database.pipeline()
r1, r2 = (pipe
.zrange(self.key, 0, count - 1, withscores=True)
.zremrangebyrank(self.key, 0, count - 1)
.execute())
return r1 | 0.004115 |
def rotateInDeclination(v1, theta_deg):
"""Rotation is chosen so a rotation of 90 degrees from zenith
ends up at ra=0, dec=0"""
axis = np.array([0,-1,0])
return rotateAroundVector(v1, axis, theta_deg) | 0.013889 |
def update_vrf_table(self, route_dist, prefix=None, next_hop=None,
route_family=None, route_type=None, tunnel_type=None,
is_withdraw=False, redundancy_mode=None,
pmsi_tunnel_type=None, **kwargs):
"""Update a BGP route in the VRF table identified by `route_dist`
with the given `next_hop`.
If `is_withdraw` is False, which is the default, add a BGP route
to the VRF table identified by `route_dist` with the given
`next_hop`.
If `is_withdraw` is True, remove a BGP route from the VRF table
and the given `next_hop` is ignored.
If `route_family` is VRF_RF_L2_EVPN, `route_type` and `kwargs`
are required to construct EVPN NLRI and `prefix` is ignored.
``redundancy_mode`` specifies a redundancy mode type.
` `pmsi_tunnel_type` specifies the type of the PMSI tunnel attribute
used to encode the multicast tunnel identifier.
This field is advertised only if route_type is
EVPN_MULTICAST_ETAG_ROUTE.
Returns assigned VPN label.
"""
from ryu.services.protocols.bgp.core import BgpCoreError
assert route_dist
if is_withdraw:
gen_lbl = False
next_hop = None
else:
gen_lbl = True
if not (is_valid_ipv4(next_hop) or is_valid_ipv6(next_hop)):
raise BgpCoreError(
desc='Invalid IPv4/IPv6 nexthop: %s' % next_hop)
vrf_table = self._tables.get((route_dist, route_family))
if vrf_table is None:
raise BgpCoreError(
desc='VRF table does not exist: route_dist=%s, '
'route_family=%s' % (route_dist, route_family))
vni = kwargs.get('vni', None)
if route_family == VRF_RF_IPV4:
if not is_valid_ipv4_prefix(prefix):
raise BgpCoreError(desc='Invalid IPv4 prefix: %s' % prefix)
ip, masklen = prefix.split('/')
prefix = IPAddrPrefix(int(masklen), ip)
elif route_family == VRF_RF_IPV6:
if not is_valid_ipv6_prefix(prefix):
raise BgpCoreError(desc='Invalid IPv6 prefix: %s' % prefix)
ip6, masklen = prefix.split('/')
prefix = IP6AddrPrefix(int(masklen), ip6)
elif route_family == VRF_RF_L2_EVPN:
assert route_type
if route_type == EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME:
# MPLS labels will be assigned automatically
kwargs['mpls_labels'] = []
if route_type == EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME:
# Inclusive Multicast Ethernet Tag Route does not have "vni",
# omit "vni" from "kwargs" here.
vni = kwargs.pop('vni', None)
subclass = EvpnNLRI._lookup_type_name(route_type)
kwargs['route_dist'] = route_dist
esi = kwargs.get('esi', None)
if esi is not None:
if isinstance(esi, dict):
esi_type = esi.get('type', 0)
esi_class = EvpnEsi._lookup_type(esi_type)
kwargs['esi'] = esi_class.from_jsondict(esi)
else: # isinstance(esi, numbers.Integral)
kwargs['esi'] = EvpnArbitraryEsi(
type_desc.Int9.from_user(esi))
if vni is not None:
# Disable to generate MPLS labels,
# because encapsulation type is not MPLS.
from ryu.services.protocols.bgp.api.prefix import (
TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE)
assert tunnel_type in [
None, TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]
gen_lbl = False
prefix = subclass(**kwargs)
else:
raise BgpCoreError(
desc='Unsupported route family %s' % route_family)
# We do not check if we have a path to given prefix, we issue
# withdrawal. Hence multiple withdrawals have not side effect.
return vrf_table.insert_vrf_path(
nlri=prefix, next_hop=next_hop, gen_lbl=gen_lbl,
is_withdraw=is_withdraw, redundancy_mode=redundancy_mode,
vni=vni, tunnel_type=tunnel_type,
pmsi_tunnel_type=pmsi_tunnel_type) | 0.001364 |
def thin(image, mask=None, iterations=1):
'''Thin an image to lines, preserving Euler number
Implements thinning as described in algorithm # 1 from
Guo, "Parallel Thinning with Two Subiteration Algorithms",
Communications of the ACM, Vol 32 #3 page 359.
'''
global thin_table, eight_connect
if thin_table is None:
thin_table = np.zeros((2,512),bool)
for i in range(512):
if (i & 16) == 0:
# All zeros -> 0
continue
pat = pattern_of(i & ~ 16)
ipat = pat.astype(int)
if scind.label(pat, eight_connect)[1] != 1:
thin_table[:,i] = True
continue
n1 = ((ipat[0,0] or ipat[0,1]) + (ipat[0,2] or ipat[1,2])+
(ipat[2,2] or ipat[2,1]) + (ipat[2,0] or ipat[1,0]))
n2 = ((ipat[0,1] or ipat[0,2]) + (ipat[1,2] or ipat[2,2])+
(ipat[2,1] or ipat[2,0]) + (ipat[1,0] or ipat[0,0]))
if min(n1,n2) not in (2,3):
thin_table[:,i] = True
continue
thin_table[0,i] = ((pat[0,1] or pat[0,2] or not pat[2,2]) and
pat[1,2])
thin_table[1,i] = ((pat[2,1] or pat[2,0] or not pat[0,0]) and
pat[1,0])
if mask is None:
masked_image = image.copy()
else:
masked_image = image.copy()
masked_image[~mask] = False
index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, False)
if iterations is None:
iterations = len(index_i)
for i in range(iterations):
hit_count = len(index_i)
for j in range(2):
index_i, index_j, = index_lookup(index_i, index_j,
masked_image,
thin_table[j], 1)
if hit_count == len(index_i):
break
masked_image = extract_from_image_lookup(image, index_i, index_j)
if not mask is None:
masked_image[~mask] = masked_image[~mask]
return masked_image | 0.018993 |
def calc_system(self, x, Y, Y_agg=None, L=None, population=None):
""" Calculates the missing part of the extension plus accounts
This method allows to specify an aggregated Y_agg for the
account calculation (see Y_agg below). However, the full Y needs
to be specified for the calculation of FY or SY.
Calculates:
- for each sector and country:
S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector,
D_exp_sector
- for each region:
D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg,
- for each region (if population vector is given):
D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap
Notes
-----
Only attributes which are not None are recalculated (for D_* this is
checked for each group (reg, cap, and w/o appendix)).
Parameters
----------
x : pandas.DataFrame or numpy.array
Industry output column vector
Y : pandas.DataFrame or numpy.arry
Full final demand array
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
L : pandas.DataFrame or numpy.array, optional
Leontief input output table L. If this is not given,
the method recalculates M based on D_cba (must be present in
the extension).
population : pandas.DataFrame or np.array, optional
Row vector with population per region
"""
if Y_agg is None:
try:
Y_agg = Y.sum(level='region',
axis=1).reindex(self.get_regions(),
axis=1)
except (AssertionError, KeyError):
Y_agg = Y.sum(level=0,
axis=1,).reindex(self.get_regions(),
axis=1)
y_vec = Y.sum(axis=0)
if self.F is None:
self.F = calc_F(self.S, x)
logging.debug(
'{} - F calculated'.format(self.name))
if self.S is None:
self.S = calc_S(self.F, x)
logging.debug('{} - S calculated'.format(self.name))
if (self.FY is None) and (self.SY is not None):
self.FY = calc_FY(self.SY, y_vec)
logging.debug('{} - FY calculated'.format(self.name))
if (self.SY is None) and (self.FY is not None):
self.SY = calc_SY(self.FY, y_vec)
logging.debug('{} - SY calculated'.format(self.name))
if self.M is None:
if L is not None:
self.M = calc_M(self.S, L)
logging.debug('{} - M calculated based on L'.format(
self.name))
else:
try:
self.M = recalc_M(self.S, self.D_cba,
Y=Y_agg,
nr_sectors=self.get_sectors().size)
logging.debug(
'{} - M calculated based on '
'D_cba and Y'.format(self.name))
except Exception as ex:
logging.debug(
'Recalculation of M not possible - cause: {}'.
format(ex))
FY_agg = 0
if self.FY is not None:
# FY_agg = ioutil.agg_columns(
# ext['FY'], self.get_Y_categories().size)
try:
FY_agg = (self.FY.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
FY_agg = (self.FY.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
if ((self.D_cba is None) or
(self.D_pba is None) or
(self.D_imp is None) or
(self.D_exp is None)):
if L is None:
logging.debug(
'Not possilbe to calculate D accounts - L not present')
return
else:
self.D_cba, self.D_pba, self.D_imp, self.D_exp = (
calc_accounts(self.S, L, Y_agg, self.get_sectors().size))
logging.debug(
'{} - Accounts D calculated'.format(self.name))
# aggregate to country
if ((self.D_cba_reg is None) or (self.D_pba_reg is None) or
(self.D_imp_reg is None) or (self.D_exp_reg is None)):
try:
self.D_cba_reg = (
self.D_cba.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_cba_reg = (
self.D_cba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_pba_reg = (
self.D_pba.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_pba_reg = (
self.D_pba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_imp_reg = (
self.D_imp.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_imp_reg = (
self.D_imp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
try:
self.D_exp_reg = (
self.D_exp.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_exp_reg = (
self.D_exp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
logging.debug(
'{} - Accounts D for regions calculated'.format(self.name))
# calc accounts per capita if population data is available
if population is not None:
if type(population) is pd.DataFrame:
# check for right order:
if (population.columns.tolist() !=
self.D_cba_reg.columns.tolist()):
logging.warning(
'Population regions are inconsistent with IO regions')
population = population.values
if ((self.D_cba_cap is None) or (self.D_pba_cap is None) or
(self.D_imp_cap is None) or (self.D_exp_cap is None)):
self.D_cba_cap = self.D_cba_reg.dot(
np.diagflat(1./population))
self.D_pba_cap = self.D_pba_reg.dot(
np.diagflat(1./population))
self.D_imp_cap = self.D_imp_reg.dot(
np.diagflat(1./population))
self.D_exp_cap = self.D_exp_reg.dot(
np.diagflat(1./population))
self.D_cba_cap.columns = self.D_cba_reg.columns
self.D_pba_cap.columns = self.D_pba_reg.columns
self.D_imp_cap.columns = self.D_imp_reg.columns
self.D_exp_cap.columns = self.D_exp_reg.columns
logging.debug(
'{} - Accounts D per capita calculated'.format(self.name))
return self | 0.000257 |
def update(self, fields=None, async_=None, jira=None, notify=True, **kwargs):
"""Update this resource on the server.
Keyword arguments are marshalled into a dict before being sent. If this
resource doesn't support ``PUT``, a :py:exc:`.JIRAError` will be raised; subclasses that specialize this method
will only raise errors in case of user error.
:param fields: Fields which should be updated for the object.
:type fields: Optional[Dict[str, Any]]
:param async_: If true the request will be added to the queue so it can be executed later using async_run()
:type async_: bool
:param jira: Instance of JIRA Client
:type jira: jira.JIRA
:param notify: Whether or not to notify users about the update. (Default: True)
:type notify: bool
:type kwargs: **Any
"""
if async_ is None:
async_ = self._options['async']
data = {}
if fields is not None:
data.update(fields)
data.update(kwargs)
data = json.dumps(data)
if not notify:
querystring = "?notifyUsers=false"
else:
querystring = ""
r = self._session.put(
self.self + querystring, data=data)
if 'autofix' in self._options and \
r.status_code == 400:
user = None
error_list = get_error_list(r)
logging.error(error_list)
if "The reporter specified is not a user." in error_list:
if 'reporter' not in data['fields']:
logging.warning(
"autofix: setting reporter to '%s' and retrying the update." % self._options['autofix'])
data['fields']['reporter'] = {
'name': self._options['autofix']}
if "Issues must be assigned." in error_list:
if 'assignee' not in data['fields']:
logging.warning("autofix: setting assignee to '%s' for %s and retrying the update." % (
self._options['autofix'], self.key))
data['fields']['assignee'] = {
'name': self._options['autofix']}
# for some reason the above approach fails on Jira 5.2.11
# so we need to change the assignee before
if "Issue type is a sub-task but parent issue key or id not specified." in error_list:
logging.warning(
"autofix: trying to fix sub-task without parent by converting to it to bug")
data['fields']['issuetype'] = {"name": "Bug"}
if "The summary is invalid because it contains newline characters." in error_list:
logging.warning("autofix: trying to fix newline in summary")
data['fields'][
'summary'] = self.fields.summary.replace("/n", "")
for error in error_list:
if re.search(r"^User '(.*)' was not found in the system\.", error, re.U):
m = re.search(
r"^User '(.*)' was not found in the system\.", error, re.U)
if m:
user = m.groups()[0]
else:
raise NotImplementedError()
if re.search(r"^User '(.*)' does not exist\.", error):
m = re.search(r"^User '(.*)' does not exist\.", error)
if m:
user = m.groups()[0]
else:
raise NotImplementedError()
if user:
logging.warning(
"Trying to add missing orphan user '%s' in order to complete the previous failed operation." % user)
jira.add_user(user, 'noreply@example.com', 10100, active=False)
# if 'assignee' not in data['fields']:
# logging.warning("autofix: setting assignee to '%s' and retrying the update." % self._options['autofix'])
# data['fields']['assignee'] = {'name': self._options['autofix']}
# EXPERIMENTAL --->
if async_:
if not hasattr(self._session, '_async_jobs'):
self._session._async_jobs = set()
self._session._async_jobs.add(threaded_requests.put(
self.self, data=json.dumps(data)))
else:
r = self._session.put(
self.self, data=json.dumps(data))
time.sleep(self._options['delay_reload'])
self._load(self.self) | 0.003231 |
def HandleForwardedIps(self, interface, forwarded_ips, interface_ip=None):
"""Handle changes to the forwarded IPs on a network interface.
Args:
interface: string, the output device to configure.
forwarded_ips: list, the forwarded IP address strings desired.
interface_ip: string, current interface ip address.
"""
desired = self.ip_forwarding_utils.ParseForwardedIps(forwarded_ips)
configured = self.ip_forwarding_utils.GetForwardedIps(
interface, interface_ip)
to_add = sorted(set(desired) - set(configured))
to_remove = sorted(set(configured) - set(desired))
self._LogForwardedIpChanges(
configured, desired, to_add, to_remove, interface)
self._AddForwardedIps(to_add, interface)
self._RemoveForwardedIps(to_remove, interface) | 0.001245 |
def _load_config_include(self, include_directory):
"""Load included configuration files.
Args:
include_directory (str): The name of the config include directory.
Returns:
list: A list of all profiles for the current App.
"""
include_directory = os.path.join(self.app_path, include_directory)
if not os.path.isdir(include_directory):
msg = 'Provided include directory does not exist ({}).'.format(include_directory)
sys.exit(msg)
profiles = []
for filename in sorted(os.listdir(include_directory)):
if filename.endswith('.json'):
self.log.info('Loading config: {}'.format(filename))
print('Include File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, filename))
config_file = os.path.join(include_directory, filename)
with open(config_file) as data_file:
try:
profiles.extend(json.load(data_file))
except ValueError as e:
print('Invalid JSON file: {}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e))
sys.exit(1)
return profiles | 0.004058 |
def _toc_fetch_finished(self):
"""Callback for when the TOC fetching is finished"""
self.cf.remove_port_callback(self.port, self._new_packet_cb)
logger.debug('[%d]: Done!', self.port)
self.finished_callback() | 0.008333 |
def secure_channel(target, credentials, options=None, *, loop=None, executor=None,
standalone_pool_for_streaming=False):
"""Creates a secure Channel to a server.
Args:
target: The server address.
credentials: A ChannelCredentials instance.
options: An optional list of key-value pairs (channel args in gRPC runtime)
to configure the channel.
Returns:
A Channel object.
"""
return Channel(_grpc.secure_channel(target, credentials, options),
loop, executor, standalone_pool_for_streaming) | 0.003571 |
def start_task(self, method, *args, **kwargs):
""" Start a task in a separate thread
Args:
method: the method to start in a separate thread
args: Accept args/kwargs arguments
"""
thread = threading.Thread(target=method, args=args, kwargs=kwargs)
thread.is_daemon = False
thread.start()
self.threads.append(thread) | 0.004926 |
def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs,
outputDir, tags=None):
"""
This function uses the glue.datafind library to obtain the location of all
the frame files that will be needed to cover the analysis of the data
given in scienceSegs. This function will not check if the returned frames
cover the whole time requested, such sanity checks are done in the
pycbc.workflow.setup_datafind_workflow entry function. As opposed to
setup_datafind_runtime_generated this call will only run one call to
datafind per ifo, spanning the whole time. This function will return a list
of files corresponding to the individual frames returned by the datafind
query. This will allow pegasus to more easily identify all the files used
as input, but may cause problems for codes that need to take frame cache
files as input.
Parameters
-----------
cp : ConfigParser.ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances
This contains the times that the workflow is expected to analyse.
outputDir : path
All output files written by datafind processes will be written to this
directory.
tags : list of strings, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
datafindcaches : list of glue.lal.Cache instances
The glue.lal.Cache representations of the various calls to the datafind
server and the returned frame files.
datafindOuts : pycbc.workflow.core.FileList
List of all the datafind output files for use later in the pipeline.
"""
datafindcaches, _ = \
setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs,
outputDir, tags=tags)
datafindouts = convert_cachelist_to_filelist(datafindcaches)
return datafindcaches, datafindouts | 0.000805 |
def handle_command(command):
"""Accepts a string command and performs an action.
Args:
command: the command to run as a string.
"""
try:
cmds = command.split(None, 1)
cmd = cmds[0]
if cmd == 'new':
add_task(get_arg(cmds))
elif cmd == 'done':
mark_done(int(get_arg(cmds)))
elif cmd == 'list':
for task in format_tasks(list_tasks()):
print task
elif cmd == 'delete':
delete_task(int(get_arg(cmds)))
else:
print_usage()
except Exception, e: # pylint: disable=broad-except
print e
print_usage() | 0.015517 |
def _open_interface(self, conn_id, iface, callback):
"""Open an interface on this device
Args:
conn_id (int): the unique identifier for the connection
iface (string): the interface name to open
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
"""
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, "Could not find connection information")
return
self.conns.begin_operation(conn_id, 'open_interface', callback, self.get_config('default_timeout'))
topics = context['topics']
open_iface_message = {'key': context['key'], 'type': 'command', 'operation': 'open_interface', 'client': self.name, 'interface': iface}
self.client.publish(topics.action, open_iface_message) | 0.006276 |
def dirs(self, path="/", **kwargs):
# type: (Text, **Any) -> Iterator[Text]
"""Walk a filesystem, yielding absolute paths to directories.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator over directory paths
(absolute from the filesystem root).
This method invokes `Walker.dirs` with the bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.dirs(self.fs, path=path) | 0.00189 |
def create(provider, count=1, name=None, **kwargs):
r'''
Create one or more cloud servers
Args:
* provider (str): Cloud provider, e.g. ec2, digitalocean
* count (int) =1: Number of instances
* name (str) =None: Name of server(s)
* \**kwargs: Provider-specific flags
'''
count = int(count)
provider = provider_by_name(provider)
options = provider.create_server_defaults
options.update(kwargs)
names = [name] * count
provider.validate_create_options(**options)
return provider.create_servers(count, names, **options) | 0.001692 |
def plogdet(K):
r"""Log of the pseudo-determinant.
It assumes that ``K`` is a positive semi-definite matrix.
Args:
K (array_like): matrix.
Returns:
float: log of the pseudo-determinant.
"""
egvals = eigvalsh(K)
return npsum(log(egvals[egvals > epsilon])) | 0.003322 |
def read_float_matrix(rx_specifier):
""" Return float matrix as np array for the given rx specifier. """
path, offset = rx_specifier.strip().split(':', maxsplit=1)
offset = int(offset)
sample_format = 4
with open(path, 'rb') as f:
# move to offset
f.seek(offset)
# assert binary ark
binary = f.read(2)
assert (binary == b'\x00B')
# assert type float 32
format = f.read(3)
assert (format == b'FM ')
# get number of mfcc features
f.read(1)
num_frames = struct.unpack('<i', f.read(4))[0]
# get size of mfcc features
f.read(1)
feature_size = struct.unpack('<i', f.read(4))[0]
# read feature data
data = f.read(num_frames * feature_size * sample_format)
feature_vector = np.frombuffer(data, dtype='float32')
feature_matrix = np.reshape(feature_vector, (num_frames, feature_size))
return feature_matrix | 0.002796 |
def events(times, labels=None, base=None, height=None, ax=None, text_kw=None,
**kwargs):
'''Plot event times as a set of vertical lines
Parameters
----------
times : np.ndarray, shape=(n,)
event times, in the format returned by
:func:`mir_eval.io.load_events` or
:func:`mir_eval.io.load_labeled_events`.
labels : list, shape=(n,), optional
event labels, in the format returned by
:func:`mir_eval.io.load_labeled_events`.
base : number
The vertical position of the base of the line.
By default, this will be the bottom of the plot.
height : number
The height of the lines.
By default, this will be the top of the plot (minus `base`).
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
text_kw : dict
If `labels` is provided, the properties of the text
objects can be specified here.
See `matplotlib.pyplot.Text` for valid parameters
kwargs
Additional keyword arguments to pass to
`matplotlib.pyplot.vlines`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# make sure we have an array for times
times = np.asarray(times)
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# If we have fresh axes, set the limits
if new_axes:
# Infer base and height
if base is None:
base = 0
if height is None:
height = 1
ax.set_ylim([base, height])
else:
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
style = next(cycler).copy()
style.update(kwargs)
# If the user provided 'colors', don't override it with 'color'
if 'colors' in style:
style.pop('color', None)
lines = ax.vlines(times, base, base + height, **style)
if labels:
for path, lab in zip(lines.get_paths(), labels):
ax.annotate(lab,
xy=(path.vertices[0][0], height),
xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
if new_axes:
ax.set_yticks([])
__expand_limits(ax, [base, base + height], which='y')
if times.size:
__expand_limits(ax, [times.min(), times.max()], which='x')
return ax | 0.000354 |
def get_actions(self, commands):
"""Get parameterized actions from command list based on command type and verb."""
actions = []
for type, turn_based, verb in commands:
if len(self.action_filter) != 0 and verb not in self.action_filter:
continue
if type == 'DiscreteMovement':
if verb in {"move", "turn", "look",
"strafe", "jumpmove", "jumpstrafe"}:
actions.append(verb + " 1")
actions.append(verb + " -1")
elif verb in {"jumpeast", "jumpnorth", "jumpsouth",
"jumpwest", "movenorth", "moveeast",
"movesouth", "movewest", "jumpuse",
"use", "attack", "jump"}:
actions.append(verb + " 1")
else:
raise CommandHandlerException("Invalid discrete command")
elif type == 'ContinuousMovement':
# Translate to discrete.
if verb in {"move", "strafe", "pitch", "turn"}:
actions.append(verb + " 1")
actions.append(verb + " -1")
elif verb in {"crouch", "jump", "attack", "use"}:
actions.append(verb + " 1")
actions.append(verb + " 0")
else:
raise CommandHandlerException("Invalid continuous command")
elif type == 'HumanLevel':
if verb == 'moveMouse':
actions.append('mouseMove 0 0')
elif verb in {'forward', 'back', 'left', 'right'}:
actions.append(verb + ' 1')
actions.append(verb + ' 0')
else:
actions.append(verb)
elif type == 'MissionQuit':
if verb != 'quit':
raise CommandHandlerException("Invalid quit command")
actions.append(verb)
elif type == 'Chat':
if verb != 'chat':
raise CommandHandlerException("Invalid chat command")
actions.append(verb)
elif type == 'SimpleCraft':
if verb != 'craft':
raise CommandHandlerException("Invalid craft command")
actions.append(verb)
elif type == 'AbsoluteMovement' or 'Inventory':
actions.append(verb)
return actions | 0.001199 |
def create_model(self, parent, name, multiplicity='ZERO_MANY', **kwargs):
"""Create a new child model under a given parent.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param parent: parent model
:param name: new model name
:param parent: parent part instance
:type parent: :class:`models.Part`
:param name: new part name
:type name: basestring
:param multiplicity: choose between ZERO_ONE, ONE, ZERO_MANY, ONE_MANY or M_N
:type multiplicity: basestring
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`models.Part` with category `MODEL`
:raises IllegalArgumentError: When the provided arguments are incorrect
:raises APIError: if the `Part` could not be created
"""
if parent.category != Category.MODEL:
raise IllegalArgumentError("The parent should be of category 'MODEL'")
data = {
"name": name,
"parent": parent.id,
"multiplicity": multiplicity
}
return self._create_part(action="create_child_model", data=data, **kwargs) | 0.005442 |
def add(self, artifact_type: ArtifactType, src_path: str,
dst_path: str=None):
"""Add an artifact of type `artifact_type` at `src_path`.
`src_path` should be the path of the file relative to project root.
`dst_path`, if given, is the desired path of the artifact in dependent
targets, relative to its base path (by type).
"""
if dst_path is None:
dst_path = src_path
other_src_path = self._artifacts[artifact_type].setdefault(
dst_path, src_path)
if src_path != other_src_path:
raise RuntimeError(
'{} artifact with dest path {} exists with different src '
'path: {} != {}'.format(artifact_type, dst_path, src_path,
other_src_path)) | 0.00612 |
def _has_attr(cls, ds, attr, concept_name, priority=BaseCheck.HIGH):
"""
Checks for the existance of attr in ds, with the name/message using concept_name.
"""
val = cls.std_check(ds, attr)
msgs = []
if not val:
msgs.append("Attr '{}' (IOOS concept: '{}') not found in dataset".format(attr, concept_name))
return Result(priority, val, concept_name, msgs) | 0.009456 |
def fcoe_networks(self):
"""
Gets the FcoeNetworks API client.
Returns:
FcoeNetworks:
"""
if not self.__fcoe_networks:
self.__fcoe_networks = FcoeNetworks(self.__connection)
return self.__fcoe_networks | 0.007299 |
def end_compress(codec, stream):
"""End of compressing the current image.
Wraps the openjp2 library function opj_end_compress.
Parameters
----------
codec : CODEC_TYPE
Compressor handle.
stream : STREAM_TYPE_P
Output stream buffer.
Raises
------
RuntimeError
If the OpenJPEG library routine opj_end_compress fails.
"""
OPENJP2.opj_end_compress.argtypes = [CODEC_TYPE, STREAM_TYPE_P]
OPENJP2.opj_end_compress.restype = check_error
OPENJP2.opj_end_compress(codec, stream) | 0.001825 |
def ParseAccountInformation(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses account information.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row with account information.
"""
query_hash = hash(query)
display_name = self._GetRowValue(query_hash, row, 'given_displayname')
fullname = self._GetRowValue(query_hash, row, 'fullname')
# TODO: Move this to the formatter, and ensure username is rendered
# properly when fullname and/or display_name is None.
username = '{0!s} <{1!s}>'.format(fullname, display_name)
event_data = SkypeAccountEventData()
event_data.country = self._GetRowValue(query_hash, row, 'country')
event_data.display_name = display_name
event_data.email = self._GetRowValue(query_hash, row, 'emails')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.username = username
timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Authenticate Request')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Online')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Mood Event')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Used')
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.006482 |
def dont_load(self, *fields):
'''Works like :meth:`load_only` to provides a
:ref:`performance boost <increase-performance>` in cases when you need
to load all fields except a subset specified by *fields*.
'''
q = self._clone()
fs = unique_tuple(q.exclude_fields, fields)
q.exclude_fields = fs if fs else None
return q | 0.005479 |
def Parse(self):
"""Iterator returning dict for each entry in history."""
for data in self.Query(self.EVENTS_QUERY):
(timestamp, agent_bundle_identifier, agent_name, url, sender,
sender_address, type_number, title, referrer, referrer_alias) = data
yield [
timestamp, "OSX_QUARANTINE", url, referrer, title, agent_name,
agent_bundle_identifier, sender, sender_address, type_number,
referrer_alias
] | 0.006522 |
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
ui.status("no modified go files\n")
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return | 0.035714 |
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts() | 0.008368 |
def randchoice(seq: Union[str, list, tuple, dict, set]) -> any:
"""Return a randomly chosen element from the given sequence.
Raises TypeError if *seq* is not str, list, tuple, dict, set and an
IndexError if it is empty.
>>> randchoice((1, 2, 'a', 'b')) #doctest:+SKIP
'a'
"""
if not isinstance(seq, (str, list, tuple, dict, set)):
raise TypeError('seq must be str, list, tuple, dict or set')
if len(seq) <= 0:
raise IndexError('seq must have at least one element')
if isinstance(seq, set):
values = list(seq)
return randchoice(values)
elif isinstance(seq, dict):
indexes = list(seq)
index = randchoice(indexes)
else:
index = randbelow(len(seq))
return seq[index] | 0.001295 |
def stop(ctx, description, f):
"""
Use it when you stop working on the current task. You can add a description
to what you've done.
"""
description = ' '.join(description)
try:
timesheet_collection = get_timesheet_collection_for_context(ctx, f)
current_timesheet = timesheet_collection.latest()
current_timesheet.continue_entry(
datetime.date.today(),
datetime.datetime.now().time(),
description
)
except ParseError as e:
ctx.obj['view'].err(e)
except NoActivityInProgressError as e:
ctx.obj['view'].err(e)
except StopInThePastError as e:
ctx.obj['view'].err(e)
else:
current_timesheet.save() | 0.001366 |
def create(self, ogpgs):
"""
Method to create object group permissions general
:param ogpgs: List containing vrf desired to be created on database
:return: None
"""
data = {'ogpgs': ogpgs}
return super(ApiObjectGroupPermissionGeneral, self).post('api/v3/object-group-perm-general/', data) | 0.008671 |
def run_id(self):
'''Run name without whitespace
'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | 0.009615 |
def connection_from_ndb_query(query, args=None, connection_type=None, edge_type=None, pageinfo_type=None,
transform_edges=None, context=None, **kwargs):
'''
A simple function that accepts an ndb Query and used ndb QueryIterator object(https://cloud.google.com/appengine/docs/python/ndb/queries#iterators)
to returns a connection object for use in GraphQL.
It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
args = args or {}
connection_type = connection_type or Connection
edge_type = edge_type or Edge
pageinfo_type = pageinfo_type or PageInfo
full_args = dict(args, **kwargs)
first = full_args.get('first')
after = full_args.get('after')
has_previous_page = bool(after)
keys_only = full_args.get('keys_only', False)
batch_size = full_args.get('batch_size', 20)
page_size = first if first else full_args.get('page_size', 20)
start_cursor = ndb.Cursor(urlsafe=after) if after else None
ndb_iter = query.iter(produce_cursors=True, start_cursor=start_cursor, batch_size=batch_size, keys_only=keys_only, projection=query.projection)
edges = []
while len(edges) < page_size:
missing_edges_count = page_size - len(edges)
edges_page = generate_edges_page(ndb_iter, missing_edges_count, keys_only, edge_type)
edges.extend(transform_edges(edges_page, args, context) if transform_edges else edges_page)
if len(edges_page) < missing_edges_count:
break
try:
end_cursor = ndb_iter.cursor_after().urlsafe()
except BadArgumentError:
end_cursor = None
# Construct the connection
return connection_type(
edges=edges,
page_info=pageinfo_type(
start_cursor=start_cursor.urlsafe() if start_cursor else '',
end_cursor=end_cursor,
has_previous_page=has_previous_page,
has_next_page=ndb_iter.has_next()
)
) | 0.003 |