Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
500 | juju/charm-helpers | charmhelpers/core/hookenv.py | log | def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
subprocess.call(command)
except OSError as e:
if e.errno == errno.ENOENT:
if level:
message = "{}: {}".format(level, message)
message = "juju-log: {}".format(message)
print(message, file=sys.stderr)
else:
raise | python | def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
subprocess.call(command)
except OSError as e:
if e.errno == errno.ENOENT:
if level:
message = "{}: {}".format(level, message)
message = "juju-log: {}".format(message)
print(message, file=sys.stderr)
else:
raise | ['def', 'log', '(', 'message', ',', 'level', '=', 'None', ')', ':', 'command', '=', '[', "'juju-log'", ']', 'if', 'level', ':', 'command', '+=', '[', "'-l'", ',', 'level', ']', 'if', 'not', 'isinstance', '(', 'message', ',', 'six', '.', 'string_types', ')', ':', 'message', '=', 'repr', '(', 'message', ')', 'command', '+=', '[', 'message', '[', ':', 'SH_MAX_ARG', ']', ']', '# Missing juju-log should not cause failures in unit tests', '# Send log output to stderr', 'try', ':', 'subprocess', '.', 'call', '(', 'command', ')', 'except', 'OSError', 'as', 'e', ':', 'if', 'e', '.', 'errno', '==', 'errno', '.', 'ENOENT', ':', 'if', 'level', ':', 'message', '=', '"{}: {}"', '.', 'format', '(', 'level', ',', 'message', ')', 'message', '=', '"juju-log: {}"', '.', 'format', '(', 'message', ')', 'print', '(', 'message', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'else', ':', 'raise'] | Write a message to the juju log | ['Write', 'a', 'message', 'to', 'the', 'juju', 'log'] | train | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L100-L119 |
501 | Chilipp/funcargparse | funcargparse/__init__.py | FuncArgParser.parse_known_chained | def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder | python | def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder | ['def', 'parse_known_chained', '(', 'self', ',', 'args', '=', 'None', ')', ':', 'ns', ',', 'remainder', '=', 'self', '.', 'parse_known_args', '(', 'args', ')', 'kws', '=', 'vars', '(', 'ns', ')', 'return', 'self', '.', '_parse2subparser_funcs', '(', 'kws', ')', ',', 'remainder'] | Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known | ['Parse', 'the', 'argument', 'directly', 'to', 'the', 'function', 'used', 'for', 'setup'] | train | https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L952-L979 |
502 | pymc-devs/pymc | pymc/StepMethods.py | TWalk._g | def _g(self, h, xp, s):
"""Density function for blow and hop moves"""
nphi = sum(self.phi)
return (nphi / 2.0) * log(2 * pi) + nphi * \
log(s) + 0.5 * sum((h - xp) ** 2) / (s ** 2) | python | def _g(self, h, xp, s):
"""Density function for blow and hop moves"""
nphi = sum(self.phi)
return (nphi / 2.0) * log(2 * pi) + nphi * \
log(s) + 0.5 * sum((h - xp) ** 2) / (s ** 2) | ['def', '_g', '(', 'self', ',', 'h', ',', 'xp', ',', 's', ')', ':', 'nphi', '=', 'sum', '(', 'self', '.', 'phi', ')', 'return', '(', 'nphi', '/', '2.0', ')', '*', 'log', '(', '2', '*', 'pi', ')', '+', 'nphi', '*', 'log', '(', 's', ')', '+', '0.5', '*', 'sum', '(', '(', 'h', '-', 'xp', ')', '**', '2', ')', '/', '(', 's', '**', '2', ')'] | Density function for blow and hop moves | ['Density', 'function', 'for', 'blow', 'and', 'hop', 'moves'] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1732-L1738 |
503 | metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | Signal.prepare_notification | def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
"""Sets up a and configures an `~.utils.Executor`:class: instance."""
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator) | python | def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
"""Sets up a and configures an `~.utils.Executor`:class: instance."""
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator) | ['def', 'prepare_notification', '(', 'self', ',', '*', ',', 'subscribers', '=', 'None', ',', 'instance', '=', 'None', ',', 'loop', '=', 'None', ',', 'notify_external', '=', 'True', ')', ':', '# merge callbacks added to the class level with those added to the', '# instance, giving the formers precedence while preserving overall', '# order', 'self_subscribers', '=', 'self', '.', 'subscribers', '.', 'copy', '(', ')', '# add in callbacks declared in the main class body and marked with', '# @handler', 'if', '(', 'instance', 'is', 'not', 'None', 'and', 'self', '.', 'name', 'and', 'isinstance', '(', 'instance', '.', '__class__', ',', 'SignalAndHandlerInitMeta', ')', ')', ':', 'class_handlers', '=', 'type', '(', 'instance', ')', '.', '_get_class_handlers', '(', 'self', '.', 'name', ',', 'instance', ')', 'for', 'ch', 'in', 'class_handlers', ':', '# eventual methods are ephemeral and normally the following', '# condition would always be True for methods but the dict used', '# has logic to take that into account', 'if', 'ch', 'not', 'in', 'self_subscribers', ':', 'self_subscribers', '.', 'append', '(', 'ch', ')', '# add in the other instance level callbacks added at runtime', 'if', 'subscribers', 'is', 'not', 'None', ':', 'for', 'el', 'in', 'subscribers', ':', '# eventual methods are ephemeral and normally the following', '# condition would always be True for methods but the dict used', '# has logic to take that into account', 'if', 'el', 'not', 'in', 'self_subscribers', ':', 'self_subscribers', '.', 'append', '(', 'el', ')', 'loop', '=', 'loop', 'or', 'self', '.', 'loop', '# maybe do a round of external publishing', 'if', 'notify_external', 'and', 'self', '.', 'external_signaller', 'is', 'not', 'None', ':', 'self_subscribers', '.', 'append', '(', 'partial', '(', 'self', '.', 'ext_publish', ',', 'instance', ',', 'loop', ')', ')', 'if', 'self', '.', '_fnotify', 'is', 'None', ':', 'fnotify', '=', 'None', 'else', ':', 'if', 'instance', 'is', 'None', ':', 'fnotify', '=', 'self', '.', '_fnotify', 'else', ':', 'fnotify', '=', 'types', '.', 'MethodType', '(', 'self', '.', '_fnotify', ',', 'instance', ')', 'validator', '=', 'self', '.', '_fvalidation', 'if', 'validator', 'is', 'not', 'None', 'and', 'instance', 'is', 'not', 'None', ':', 'validator', '=', 'types', '.', 'MethodType', '(', 'validator', ',', 'instance', ')', 'return', 'Executor', '(', 'self_subscribers', ',', 'owner', '=', 'self', ',', 'concurrent', '=', 'SignalOptions', '.', 'EXEC_CONCURRENT', 'in', 'self', '.', 'flags', ',', 'loop', '=', 'loop', ',', 'exec_wrapper', '=', 'fnotify', ',', 'fvalidation', '=', 'validator', ')'] | Sets up a and configures an `~.utils.Executor`:class: instance. | ['Sets', 'up', 'a', 'and', 'configures', 'an', '~', '.', 'utils', '.', 'Executor', ':', 'class', ':', 'instance', '.'] | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L339-L383 |
504 | nickmckay/LiPD-utilities | Python/lipd/csvs.py | _reorder_csv | def _reorder_csv(d, filename=""):
"""
Preserve the csv column ordering before writing back out to CSV file. Keep column data consistent with JSONLD
column number alignment.
{ "var1" : {"number": 1, "values": [] }, "var2": {"number": 1, "values": [] } }
:param dict d: csv data
:param str filename: Filename
:return dict: csv data
"""
_ensemble = is_ensemble(d)
_d2 = []
try:
if _ensemble:
# 1 column ensemble: realizations
if len(d) == 1:
for var, data in d.items():
if "values" in data:
_d2 = data["values"]
# 2 column ensemble: depth and realizations
else:
_count = 0
# count up how many columns total, and how many placeholders to make in our list
for var, data in d.items():
if isinstance(data["number"], list):
_curr_count = len(data["number"])
_count += _curr_count
elif isinstance(data["number"], (int, float, str)):
_count += 1
# make a list with X number of placeholders
_d2 = [None for i in range(0, _count)]
# Loop again and start combining all columns into one list of lists
for var, data in d.items():
# realizations: insert at (hopefully) index 1,2...1001
if isinstance(data["number"], list):
for idx, number in enumerate(data["number"]):
# we can't trust the number entries. sometimes they start at "number 1",
# which isn't true, because DEPTH is number 1. Use enumerate index instead.
_insert_at = int(idx) + 1
# Insert at one above the index. Grab values at exact index
_d2[_insert_at] = data["values"][idx-1]
# depth column: insert at (hopefully) index 0
else:
# we can trust to use the number entry as an index placement
_insert_at = int(data["number"]) - 1
# insert at one below number, to compensate for 0-index
_d2[_insert_at] = data["values"]
else:
_count = len(d)
_d2 = [None for i in range(0, _count)]
for key, data in d.items():
_insert_at = int(data["number"]) - 1
_d2[_insert_at] = data["values"]
except Exception as e:
print("Error: Unable to write CSV: There was an error trying to prep the values for file write: {}".format(e))
logger_csvs.error("reorder_csvs: Unable to write CSV file: {}, {}".format(filename, e))
return _d2 | python | def _reorder_csv(d, filename=""):
"""
Preserve the csv column ordering before writing back out to CSV file. Keep column data consistent with JSONLD
column number alignment.
{ "var1" : {"number": 1, "values": [] }, "var2": {"number": 1, "values": [] } }
:param dict d: csv data
:param str filename: Filename
:return dict: csv data
"""
_ensemble = is_ensemble(d)
_d2 = []
try:
if _ensemble:
# 1 column ensemble: realizations
if len(d) == 1:
for var, data in d.items():
if "values" in data:
_d2 = data["values"]
# 2 column ensemble: depth and realizations
else:
_count = 0
# count up how many columns total, and how many placeholders to make in our list
for var, data in d.items():
if isinstance(data["number"], list):
_curr_count = len(data["number"])
_count += _curr_count
elif isinstance(data["number"], (int, float, str)):
_count += 1
# make a list with X number of placeholders
_d2 = [None for i in range(0, _count)]
# Loop again and start combining all columns into one list of lists
for var, data in d.items():
# realizations: insert at (hopefully) index 1,2...1001
if isinstance(data["number"], list):
for idx, number in enumerate(data["number"]):
# we can't trust the number entries. sometimes they start at "number 1",
# which isn't true, because DEPTH is number 1. Use enumerate index instead.
_insert_at = int(idx) + 1
# Insert at one above the index. Grab values at exact index
_d2[_insert_at] = data["values"][idx-1]
# depth column: insert at (hopefully) index 0
else:
# we can trust to use the number entry as an index placement
_insert_at = int(data["number"]) - 1
# insert at one below number, to compensate for 0-index
_d2[_insert_at] = data["values"]
else:
_count = len(d)
_d2 = [None for i in range(0, _count)]
for key, data in d.items():
_insert_at = int(data["number"]) - 1
_d2[_insert_at] = data["values"]
except Exception as e:
print("Error: Unable to write CSV: There was an error trying to prep the values for file write: {}".format(e))
logger_csvs.error("reorder_csvs: Unable to write CSV file: {}, {}".format(filename, e))
return _d2 | ['def', '_reorder_csv', '(', 'd', ',', 'filename', '=', '""', ')', ':', '_ensemble', '=', 'is_ensemble', '(', 'd', ')', '_d2', '=', '[', ']', 'try', ':', 'if', '_ensemble', ':', '# 1 column ensemble: realizations', 'if', 'len', '(', 'd', ')', '==', '1', ':', 'for', 'var', ',', 'data', 'in', 'd', '.', 'items', '(', ')', ':', 'if', '"values"', 'in', 'data', ':', '_d2', '=', 'data', '[', '"values"', ']', '# 2 column ensemble: depth and realizations', 'else', ':', '_count', '=', '0', '# count up how many columns total, and how many placeholders to make in our list', 'for', 'var', ',', 'data', 'in', 'd', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'data', '[', '"number"', ']', ',', 'list', ')', ':', '_curr_count', '=', 'len', '(', 'data', '[', '"number"', ']', ')', '_count', '+=', '_curr_count', 'elif', 'isinstance', '(', 'data', '[', '"number"', ']', ',', '(', 'int', ',', 'float', ',', 'str', ')', ')', ':', '_count', '+=', '1', '# make a list with X number of placeholders', '_d2', '=', '[', 'None', 'for', 'i', 'in', 'range', '(', '0', ',', '_count', ')', ']', '# Loop again and start combining all columns into one list of lists', 'for', 'var', ',', 'data', 'in', 'd', '.', 'items', '(', ')', ':', '# realizations: insert at (hopefully) index 1,2...1001', 'if', 'isinstance', '(', 'data', '[', '"number"', ']', ',', 'list', ')', ':', 'for', 'idx', ',', 'number', 'in', 'enumerate', '(', 'data', '[', '"number"', ']', ')', ':', '# we can\'t trust the number entries. sometimes they start at "number 1",', "# which isn't true, because DEPTH is number 1. Use enumerate index instead.", '_insert_at', '=', 'int', '(', 'idx', ')', '+', '1', '# Insert at one above the index. Grab values at exact index', '_d2', '[', '_insert_at', ']', '=', 'data', '[', '"values"', ']', '[', 'idx', '-', '1', ']', '# depth column: insert at (hopefully) index 0', 'else', ':', '# we can trust to use the number entry as an index placement', '_insert_at', '=', 'int', '(', 'data', '[', '"number"', ']', ')', '-', '1', '# insert at one below number, to compensate for 0-index', '_d2', '[', '_insert_at', ']', '=', 'data', '[', '"values"', ']', 'else', ':', '_count', '=', 'len', '(', 'd', ')', '_d2', '=', '[', 'None', 'for', 'i', 'in', 'range', '(', '0', ',', '_count', ')', ']', 'for', 'key', ',', 'data', 'in', 'd', '.', 'items', '(', ')', ':', '_insert_at', '=', 'int', '(', 'data', '[', '"number"', ']', ')', '-', '1', '_d2', '[', '_insert_at', ']', '=', 'data', '[', '"values"', ']', 'except', 'Exception', 'as', 'e', ':', 'print', '(', '"Error: Unable to write CSV: There was an error trying to prep the values for file write: {}"', '.', 'format', '(', 'e', ')', ')', 'logger_csvs', '.', 'error', '(', '"reorder_csvs: Unable to write CSV file: {}, {}"', '.', 'format', '(', 'filename', ',', 'e', ')', ')', 'return', '_d2'] | Preserve the csv column ordering before writing back out to CSV file. Keep column data consistent with JSONLD
column number alignment.
{ "var1" : {"number": 1, "values": [] }, "var2": {"number": 1, "values": [] } }
:param dict d: csv data
:param str filename: Filename
:return dict: csv data | ['Preserve', 'the', 'csv', 'column', 'ordering', 'before', 'writing', 'back', 'out', 'to', 'CSV', 'file', '.', 'Keep', 'column', 'data', 'consistent', 'with', 'JSONLD', 'column', 'number', 'alignment', '.', '{', 'var1', ':', '{', 'number', ':', '1', 'values', ':', '[]', '}', 'var2', ':', '{', 'number', ':', '1', 'values', ':', '[]', '}', '}'] | train | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L470-L528 |
505 | KelSolaar/Manager | manager/components_manager.py | Manager.list_components | def list_components(self, dependency_order=True):
"""
Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool
"""
if dependency_order:
return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in
foundations.common.dependency_resolver(
dict((key, value.require) for (key, value) in self))]))
else:
return [key for (key, value) in self] | python | def list_components(self, dependency_order=True):
"""
Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool
"""
if dependency_order:
return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in
foundations.common.dependency_resolver(
dict((key, value.require) for (key, value) in self))]))
else:
return [key for (key, value) in self] | ['def', 'list_components', '(', 'self', ',', 'dependency_order', '=', 'True', ')', ':', 'if', 'dependency_order', ':', 'return', 'list', '(', 'itertools', '.', 'chain', '.', 'from_iterable', '(', '[', 'sorted', '(', 'list', '(', 'batch', ')', ')', 'for', 'batch', 'in', 'foundations', '.', 'common', '.', 'dependency_resolver', '(', 'dict', '(', '(', 'key', ',', 'value', '.', 'require', ')', 'for', '(', 'key', ',', 'value', ')', 'in', 'self', ')', ')', ']', ')', ')', 'else', ':', 'return', '[', 'key', 'for', '(', 'key', ',', 'value', ')', 'in', 'self', ']'] | Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool | ['Lists', 'the', 'Components', 'by', 'dependency', 'resolving', '.'] | train | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1217-L1238 |
506 | twisted/txaws | txaws/wsdl.py | SequenceSchema.create | def create(self, root=None, namespace=None):
"""Create a sequence element with the given root.
@param root: The C{etree.Element} to root the sequence at, if C{None} a
new one will be created..
@result: A L{SequenceItem} with the given root.
@raises L{ECResponseError}: If the given C{root} has a bad tag.
"""
if root is not None:
tag = root.tag
if root.nsmap:
namespace = root.nsmap[None]
tag = tag[len(namespace) + 2:]
if tag != self.tag:
raise WSDLParseError("Expected response with tag '%s', but "
"got '%s' instead" % (self.tag, tag))
return SequenceItem(self, root, namespace) | python | def create(self, root=None, namespace=None):
"""Create a sequence element with the given root.
@param root: The C{etree.Element} to root the sequence at, if C{None} a
new one will be created..
@result: A L{SequenceItem} with the given root.
@raises L{ECResponseError}: If the given C{root} has a bad tag.
"""
if root is not None:
tag = root.tag
if root.nsmap:
namespace = root.nsmap[None]
tag = tag[len(namespace) + 2:]
if tag != self.tag:
raise WSDLParseError("Expected response with tag '%s', but "
"got '%s' instead" % (self.tag, tag))
return SequenceItem(self, root, namespace) | ['def', 'create', '(', 'self', ',', 'root', '=', 'None', ',', 'namespace', '=', 'None', ')', ':', 'if', 'root', 'is', 'not', 'None', ':', 'tag', '=', 'root', '.', 'tag', 'if', 'root', '.', 'nsmap', ':', 'namespace', '=', 'root', '.', 'nsmap', '[', 'None', ']', 'tag', '=', 'tag', '[', 'len', '(', 'namespace', ')', '+', '2', ':', ']', 'if', 'tag', '!=', 'self', '.', 'tag', ':', 'raise', 'WSDLParseError', '(', '"Expected response with tag \'%s\', but "', '"got \'%s\' instead"', '%', '(', 'self', '.', 'tag', ',', 'tag', ')', ')', 'return', 'SequenceItem', '(', 'self', ',', 'root', ',', 'namespace', ')'] | Create a sequence element with the given root.
@param root: The C{etree.Element} to root the sequence at, if C{None} a
new one will be created..
@result: A L{SequenceItem} with the given root.
@raises L{ECResponseError}: If the given C{root} has a bad tag. | ['Create', 'a', 'sequence', 'element', 'with', 'the', 'given', 'root', '.'] | train | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/wsdl.py#L316-L332 |
507 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/module.py | _ModuleContainer.split_label_fuzzy | def split_label_fuzzy(self, label):
"""
Splits a label entered as user input.
It's more flexible in it's syntax parsing than the L{split_label_strict}
method, as it allows the exclamation mark (B{C{!}}) to be omitted. The
ambiguity is resolved by searching the modules in the snapshot to guess
if a label refers to a module or a function. It also tries to rebuild
labels when they contain hardcoded addresses.
@warning: This method only parses the label, it doesn't make sure the
label actually points to a valid memory location.
@type label: str
@param label: Label to split.
@rtype: tuple( str or None, str or int or None, int or None )
@return: Tuple containing the C{module} name,
the C{function} name or ordinal, and the C{offset} value.
If the label doesn't specify a module,
then C{module} is C{None}.
If the label doesn't specify a function,
then C{function} is C{None}.
If the label doesn't specify an offset,
then C{offset} is C{0}.
@raise ValueError: The label is malformed.
"""
module = function = None
offset = 0
# Special case: None
if not label:
label = compat.b("0x0")
else:
# Remove all blanks.
label = label.replace(compat.b(' '), compat.b(''))
label = label.replace(compat.b('\t'), compat.b(''))
label = label.replace(compat.b('\r'), compat.b(''))
label = label.replace(compat.b('\n'), compat.b(''))
# Special case: empty label.
if not label:
label = compat.b("0x0")
# If an exclamation sign is present, we know we can parse it strictly.
if compat.b('!') in label:
return self.split_label_strict(label)
## # Try to parse it strictly, on error do it the fuzzy way.
## try:
## return self.split_label(label)
## except ValueError:
## pass
# * + offset
if compat.b('+') in label:
try:
prefix, offset = label.split(compat.b('+'))
except ValueError:
raise ValueError("Malformed label: %s" % label)
try:
offset = HexInput.integer(offset)
except ValueError:
raise ValueError("Malformed label: %s" % label)
label = prefix
# This parses both filenames and base addresses.
modobj = self.get_module_by_name(label)
if modobj:
# module
# module + offset
module = modobj.get_name()
else:
# TODO
# If 0xAAAAAAAA + 0xBBBBBBBB is given,
# A is interpreted as a module base address,
# and B as an offset.
# If that fails, it'd be good to add A+B and try to
# use the nearest loaded module.
# offset
# base address + offset (when no module has that base address)
try:
address = HexInput.integer(label)
if offset:
# If 0xAAAAAAAA + 0xBBBBBBBB is given,
# A is interpreted as a module base address,
# and B as an offset.
# If that fails, we get here, meaning no module was found
# at A. Then add up A+B and work with that as a hardcoded
# address.
offset = address + offset
else:
# If the label is a hardcoded address, we get here.
offset = address
# If only a hardcoded address is given,
# rebuild the label using get_label_at_address.
# Then parse it again, but this time strictly,
# both because there is no need for fuzzy syntax and
# to prevent an infinite recursion if there's a bug here.
try:
new_label = self.get_label_at_address(offset)
module, function, offset = \
self.split_label_strict(new_label)
except ValueError:
pass
# function
# function + offset
except ValueError:
function = label
# Convert function ordinal strings into integers.
if function and function.startswith(compat.b('#')):
try:
function = HexInput.integer(function[1:])
except ValueError:
pass
# Convert null offsets to None.
if not offset:
offset = None
return (module, function, offset) | python | def split_label_fuzzy(self, label):
"""
Splits a label entered as user input.
It's more flexible in it's syntax parsing than the L{split_label_strict}
method, as it allows the exclamation mark (B{C{!}}) to be omitted. The
ambiguity is resolved by searching the modules in the snapshot to guess
if a label refers to a module or a function. It also tries to rebuild
labels when they contain hardcoded addresses.
@warning: This method only parses the label, it doesn't make sure the
label actually points to a valid memory location.
@type label: str
@param label: Label to split.
@rtype: tuple( str or None, str or int or None, int or None )
@return: Tuple containing the C{module} name,
the C{function} name or ordinal, and the C{offset} value.
If the label doesn't specify a module,
then C{module} is C{None}.
If the label doesn't specify a function,
then C{function} is C{None}.
If the label doesn't specify an offset,
then C{offset} is C{0}.
@raise ValueError: The label is malformed.
"""
module = function = None
offset = 0
# Special case: None
if not label:
label = compat.b("0x0")
else:
# Remove all blanks.
label = label.replace(compat.b(' '), compat.b(''))
label = label.replace(compat.b('\t'), compat.b(''))
label = label.replace(compat.b('\r'), compat.b(''))
label = label.replace(compat.b('\n'), compat.b(''))
# Special case: empty label.
if not label:
label = compat.b("0x0")
# If an exclamation sign is present, we know we can parse it strictly.
if compat.b('!') in label:
return self.split_label_strict(label)
## # Try to parse it strictly, on error do it the fuzzy way.
## try:
## return self.split_label(label)
## except ValueError:
## pass
# * + offset
if compat.b('+') in label:
try:
prefix, offset = label.split(compat.b('+'))
except ValueError:
raise ValueError("Malformed label: %s" % label)
try:
offset = HexInput.integer(offset)
except ValueError:
raise ValueError("Malformed label: %s" % label)
label = prefix
# This parses both filenames and base addresses.
modobj = self.get_module_by_name(label)
if modobj:
# module
# module + offset
module = modobj.get_name()
else:
# TODO
# If 0xAAAAAAAA + 0xBBBBBBBB is given,
# A is interpreted as a module base address,
# and B as an offset.
# If that fails, it'd be good to add A+B and try to
# use the nearest loaded module.
# offset
# base address + offset (when no module has that base address)
try:
address = HexInput.integer(label)
if offset:
# If 0xAAAAAAAA + 0xBBBBBBBB is given,
# A is interpreted as a module base address,
# and B as an offset.
# If that fails, we get here, meaning no module was found
# at A. Then add up A+B and work with that as a hardcoded
# address.
offset = address + offset
else:
# If the label is a hardcoded address, we get here.
offset = address
# If only a hardcoded address is given,
# rebuild the label using get_label_at_address.
# Then parse it again, but this time strictly,
# both because there is no need for fuzzy syntax and
# to prevent an infinite recursion if there's a bug here.
try:
new_label = self.get_label_at_address(offset)
module, function, offset = \
self.split_label_strict(new_label)
except ValueError:
pass
# function
# function + offset
except ValueError:
function = label
# Convert function ordinal strings into integers.
if function and function.startswith(compat.b('#')):
try:
function = HexInput.integer(function[1:])
except ValueError:
pass
# Convert null offsets to None.
if not offset:
offset = None
return (module, function, offset) | ['def', 'split_label_fuzzy', '(', 'self', ',', 'label', ')', ':', 'module', '=', 'function', '=', 'None', 'offset', '=', '0', '# Special case: None', 'if', 'not', 'label', ':', 'label', '=', 'compat', '.', 'b', '(', '"0x0"', ')', 'else', ':', '# Remove all blanks.', 'label', '=', 'label', '.', 'replace', '(', 'compat', '.', 'b', '(', "' '", ')', ',', 'compat', '.', 'b', '(', "''", ')', ')', 'label', '=', 'label', '.', 'replace', '(', 'compat', '.', 'b', '(', "'\\t'", ')', ',', 'compat', '.', 'b', '(', "''", ')', ')', 'label', '=', 'label', '.', 'replace', '(', 'compat', '.', 'b', '(', "'\\r'", ')', ',', 'compat', '.', 'b', '(', "''", ')', ')', 'label', '=', 'label', '.', 'replace', '(', 'compat', '.', 'b', '(', "'\\n'", ')', ',', 'compat', '.', 'b', '(', "''", ')', ')', '# Special case: empty label.', 'if', 'not', 'label', ':', 'label', '=', 'compat', '.', 'b', '(', '"0x0"', ')', '# If an exclamation sign is present, we know we can parse it strictly.', 'if', 'compat', '.', 'b', '(', "'!'", ')', 'in', 'label', ':', 'return', 'self', '.', 'split_label_strict', '(', 'label', ')', '## # Try to parse it strictly, on error do it the fuzzy way.', '## try:', '## return self.split_label(label)', '## except ValueError:', '## pass', '# * + offset', 'if', 'compat', '.', 'b', '(', "'+'", ')', 'in', 'label', ':', 'try', ':', 'prefix', ',', 'offset', '=', 'label', '.', 'split', '(', 'compat', '.', 'b', '(', "'+'", ')', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', '"Malformed label: %s"', '%', 'label', ')', 'try', ':', 'offset', '=', 'HexInput', '.', 'integer', '(', 'offset', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', '"Malformed label: %s"', '%', 'label', ')', 'label', '=', 'prefix', '# This parses both filenames and base addresses.', 'modobj', '=', 'self', '.', 'get_module_by_name', '(', 'label', ')', 'if', 'modobj', ':', '# module', '# module + offset', 'module', '=', 'modobj', '.', 'get_name', '(', ')', 'else', ':', '# TODO', '# If 0xAAAAAAAA + 0xBBBBBBBB is given,', '# A is interpreted as a module base address,', '# and B as an offset.', "# If that fails, it'd be good to add A+B and try to", '# use the nearest loaded module.', '# offset', '# base address + offset (when no module has that base address)', 'try', ':', 'address', '=', 'HexInput', '.', 'integer', '(', 'label', ')', 'if', 'offset', ':', '# If 0xAAAAAAAA + 0xBBBBBBBB is given,', '# A is interpreted as a module base address,', '# and B as an offset.', '# If that fails, we get here, meaning no module was found', '# at A. Then add up A+B and work with that as a hardcoded', '# address.', 'offset', '=', 'address', '+', 'offset', 'else', ':', '# If the label is a hardcoded address, we get here.', 'offset', '=', 'address', '# If only a hardcoded address is given,', '# rebuild the label using get_label_at_address.', '# Then parse it again, but this time strictly,', '# both because there is no need for fuzzy syntax and', "# to prevent an infinite recursion if there's a bug here.", 'try', ':', 'new_label', '=', 'self', '.', 'get_label_at_address', '(', 'offset', ')', 'module', ',', 'function', ',', 'offset', '=', 'self', '.', 'split_label_strict', '(', 'new_label', ')', 'except', 'ValueError', ':', 'pass', '# function', '# function + offset', 'except', 'ValueError', ':', 'function', '=', 'label', '# Convert function ordinal strings into integers.', 'if', 'function', 'and', 'function', '.', 'startswith', '(', 'compat', '.', 'b', '(', "'#'", ')', ')', ':', 'try', ':', 'function', '=', 'HexInput', '.', 'integer', '(', 'function', '[', '1', ':', ']', ')', 'except', 'ValueError', ':', 'pass', '# Convert null offsets to None.', 'if', 'not', 'offset', ':', 'offset', '=', 'None', 'return', '(', 'module', ',', 'function', ',', 'offset', ')'] | Splits a label entered as user input.
It's more flexible in it's syntax parsing than the L{split_label_strict}
method, as it allows the exclamation mark (B{C{!}}) to be omitted. The
ambiguity is resolved by searching the modules in the snapshot to guess
if a label refers to a module or a function. It also tries to rebuild
labels when they contain hardcoded addresses.
@warning: This method only parses the label, it doesn't make sure the
label actually points to a valid memory location.
@type label: str
@param label: Label to split.
@rtype: tuple( str or None, str or int or None, int or None )
@return: Tuple containing the C{module} name,
the C{function} name or ordinal, and the C{offset} value.
If the label doesn't specify a module,
then C{module} is C{None}.
If the label doesn't specify a function,
then C{function} is C{None}.
If the label doesn't specify an offset,
then C{offset} is C{0}.
@raise ValueError: The label is malformed. | ['Splits', 'a', 'label', 'entered', 'as', 'user', 'input', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L1317-L1450 |
508 | odlgroup/odl | odl/ufunc_ops/ufunc_ops.py | find_min_signature | def find_min_signature(ufunc, dtypes_in):
"""Determine the minimum matching ufunc signature for given dtypes.
Parameters
----------
ufunc : str or numpy.ufunc
Ufunc whose signatures are to be considered.
dtypes_in :
Sequence of objects specifying input dtypes. Its length must match
the number of inputs of ``ufunc``, and its entries must be understood
by `numpy.dtype`.
Returns
-------
signature : str
Minimum matching ufunc signature, see, e.g., ``np.add.types``
for examples.
Raises
------
TypeError
If no valid signature is found.
"""
if not isinstance(ufunc, np.ufunc):
ufunc = getattr(np, str(ufunc))
dtypes_in = [np.dtype(dt_in) for dt_in in dtypes_in]
tcs_in = [dt.base.char for dt in dtypes_in]
if len(tcs_in) != ufunc.nin:
raise ValueError('expected {} input dtype(s) for {}, got {}'
''.format(ufunc.nin, ufunc, len(tcs_in)))
valid_sigs = []
for sig in ufunc.types:
sig_tcs_in, sig_tcs_out = sig.split('->')
if all(np.dtype(tc_in) <= np.dtype(sig_tc_in) and
sig_tc_in in SUPP_TYPECODES
for tc_in, sig_tc_in in zip(tcs_in, sig_tcs_in)):
valid_sigs.append(sig)
if not valid_sigs:
raise TypeError('no valid signature found for {} and input dtypes {}'
''.format(ufunc, tuple(dt.name for dt in dtypes_in)))
def in_dtypes(sig):
"""Comparison key function for input dtypes of a signature."""
sig_tcs_in = sig.split('->')[0]
return tuple(np.dtype(tc) for tc in sig_tcs_in)
return min(valid_sigs, key=in_dtypes) | python | def find_min_signature(ufunc, dtypes_in):
"""Determine the minimum matching ufunc signature for given dtypes.
Parameters
----------
ufunc : str or numpy.ufunc
Ufunc whose signatures are to be considered.
dtypes_in :
Sequence of objects specifying input dtypes. Its length must match
the number of inputs of ``ufunc``, and its entries must be understood
by `numpy.dtype`.
Returns
-------
signature : str
Minimum matching ufunc signature, see, e.g., ``np.add.types``
for examples.
Raises
------
TypeError
If no valid signature is found.
"""
if not isinstance(ufunc, np.ufunc):
ufunc = getattr(np, str(ufunc))
dtypes_in = [np.dtype(dt_in) for dt_in in dtypes_in]
tcs_in = [dt.base.char for dt in dtypes_in]
if len(tcs_in) != ufunc.nin:
raise ValueError('expected {} input dtype(s) for {}, got {}'
''.format(ufunc.nin, ufunc, len(tcs_in)))
valid_sigs = []
for sig in ufunc.types:
sig_tcs_in, sig_tcs_out = sig.split('->')
if all(np.dtype(tc_in) <= np.dtype(sig_tc_in) and
sig_tc_in in SUPP_TYPECODES
for tc_in, sig_tc_in in zip(tcs_in, sig_tcs_in)):
valid_sigs.append(sig)
if not valid_sigs:
raise TypeError('no valid signature found for {} and input dtypes {}'
''.format(ufunc, tuple(dt.name for dt in dtypes_in)))
def in_dtypes(sig):
"""Comparison key function for input dtypes of a signature."""
sig_tcs_in = sig.split('->')[0]
return tuple(np.dtype(tc) for tc in sig_tcs_in)
return min(valid_sigs, key=in_dtypes) | ['def', 'find_min_signature', '(', 'ufunc', ',', 'dtypes_in', ')', ':', 'if', 'not', 'isinstance', '(', 'ufunc', ',', 'np', '.', 'ufunc', ')', ':', 'ufunc', '=', 'getattr', '(', 'np', ',', 'str', '(', 'ufunc', ')', ')', 'dtypes_in', '=', '[', 'np', '.', 'dtype', '(', 'dt_in', ')', 'for', 'dt_in', 'in', 'dtypes_in', ']', 'tcs_in', '=', '[', 'dt', '.', 'base', '.', 'char', 'for', 'dt', 'in', 'dtypes_in', ']', 'if', 'len', '(', 'tcs_in', ')', '!=', 'ufunc', '.', 'nin', ':', 'raise', 'ValueError', '(', "'expected {} input dtype(s) for {}, got {}'", "''", '.', 'format', '(', 'ufunc', '.', 'nin', ',', 'ufunc', ',', 'len', '(', 'tcs_in', ')', ')', ')', 'valid_sigs', '=', '[', ']', 'for', 'sig', 'in', 'ufunc', '.', 'types', ':', 'sig_tcs_in', ',', 'sig_tcs_out', '=', 'sig', '.', 'split', '(', "'->'", ')', 'if', 'all', '(', 'np', '.', 'dtype', '(', 'tc_in', ')', '<=', 'np', '.', 'dtype', '(', 'sig_tc_in', ')', 'and', 'sig_tc_in', 'in', 'SUPP_TYPECODES', 'for', 'tc_in', ',', 'sig_tc_in', 'in', 'zip', '(', 'tcs_in', ',', 'sig_tcs_in', ')', ')', ':', 'valid_sigs', '.', 'append', '(', 'sig', ')', 'if', 'not', 'valid_sigs', ':', 'raise', 'TypeError', '(', "'no valid signature found for {} and input dtypes {}'", "''", '.', 'format', '(', 'ufunc', ',', 'tuple', '(', 'dt', '.', 'name', 'for', 'dt', 'in', 'dtypes_in', ')', ')', ')', 'def', 'in_dtypes', '(', 'sig', ')', ':', '"""Comparison key function for input dtypes of a signature."""', 'sig_tcs_in', '=', 'sig', '.', 'split', '(', "'->'", ')', '[', '0', ']', 'return', 'tuple', '(', 'np', '.', 'dtype', '(', 'tc', ')', 'for', 'tc', 'in', 'sig_tcs_in', ')', 'return', 'min', '(', 'valid_sigs', ',', 'key', '=', 'in_dtypes', ')'] | Determine the minimum matching ufunc signature for given dtypes.
Parameters
----------
ufunc : str or numpy.ufunc
Ufunc whose signatures are to be considered.
dtypes_in :
Sequence of objects specifying input dtypes. Its length must match
the number of inputs of ``ufunc``, and its entries must be understood
by `numpy.dtype`.
Returns
-------
signature : str
Minimum matching ufunc signature, see, e.g., ``np.add.types``
for examples.
Raises
------
TypeError
If no valid signature is found. | ['Determine', 'the', 'minimum', 'matching', 'ufunc', 'signature', 'for', 'given', 'dtypes', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/ufunc_ops/ufunc_ops.py#L27-L77 |
509 | knipknap/exscript | Exscript/util/mail.py | from_template_string | def from_template_string(string, **kwargs):
"""
Reads the given SMTP formatted template, and creates a new Mail object
using the information.
:type string: str
:param string: The SMTP formatted template.
:type kwargs: str
:param kwargs: Variables to replace in the template.
:rtype: Mail
:return: The resulting mail.
"""
tmpl = _render_template(string, **kwargs)
mail = Mail()
mail.set_from_template_string(tmpl)
return mail | python | def from_template_string(string, **kwargs):
"""
Reads the given SMTP formatted template, and creates a new Mail object
using the information.
:type string: str
:param string: The SMTP formatted template.
:type kwargs: str
:param kwargs: Variables to replace in the template.
:rtype: Mail
:return: The resulting mail.
"""
tmpl = _render_template(string, **kwargs)
mail = Mail()
mail.set_from_template_string(tmpl)
return mail | ['def', 'from_template_string', '(', 'string', ',', '*', '*', 'kwargs', ')', ':', 'tmpl', '=', '_render_template', '(', 'string', ',', '*', '*', 'kwargs', ')', 'mail', '=', 'Mail', '(', ')', 'mail', '.', 'set_from_template_string', '(', 'tmpl', ')', 'return', 'mail'] | Reads the given SMTP formatted template, and creates a new Mail object
using the information.
:type string: str
:param string: The SMTP formatted template.
:type kwargs: str
:param kwargs: Variables to replace in the template.
:rtype: Mail
:return: The resulting mail. | ['Reads', 'the', 'given', 'SMTP', 'formatted', 'template', 'and', 'creates', 'a', 'new', 'Mail', 'object', 'using', 'the', 'information', '.'] | train | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/mail.py#L399-L414 |
510 | inasafe/inasafe | safe/impact_function/provenance_utilities.py | get_multi_exposure_analysis_question | def get_multi_exposure_analysis_question(hazard, exposures):
"""Construct analysis question based on hazard and exposures.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: A list of exposure definition.
:type exposure: list
:returns: Analysis question based on reporting standards.
:rtype: str
"""
exposures_question = ''
exposure_format = '{exposure_measure} {exposure_name}'
for index, exposure in enumerate(exposures):
if index + 1 == len(exposures):
if len(exposures) > 2:
exposures_question += tr(', and ')
else:
exposures_question += tr(' and ')
elif index != 0:
exposures_question += ', '
exposures_question += exposure_format.format(
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
if hazard == hazard_generic:
question = tr(
'In each of the hazard zones, {exposures_question} '
'might be affected?').format(exposures_question=exposures_question)
else:
question = tr(
'In the event of a {hazard_name}, {exposures_question} '
'might be affected?').format(
hazard_name=hazard['name'], exposures_question=exposures_question)
return question | python | def get_multi_exposure_analysis_question(hazard, exposures):
"""Construct analysis question based on hazard and exposures.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: A list of exposure definition.
:type exposure: list
:returns: Analysis question based on reporting standards.
:rtype: str
"""
exposures_question = ''
exposure_format = '{exposure_measure} {exposure_name}'
for index, exposure in enumerate(exposures):
if index + 1 == len(exposures):
if len(exposures) > 2:
exposures_question += tr(', and ')
else:
exposures_question += tr(' and ')
elif index != 0:
exposures_question += ', '
exposures_question += exposure_format.format(
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
if hazard == hazard_generic:
question = tr(
'In each of the hazard zones, {exposures_question} '
'might be affected?').format(exposures_question=exposures_question)
else:
question = tr(
'In the event of a {hazard_name}, {exposures_question} '
'might be affected?').format(
hazard_name=hazard['name'], exposures_question=exposures_question)
return question | ['def', 'get_multi_exposure_analysis_question', '(', 'hazard', ',', 'exposures', ')', ':', 'exposures_question', '=', "''", 'exposure_format', '=', "'{exposure_measure} {exposure_name}'", 'for', 'index', ',', 'exposure', 'in', 'enumerate', '(', 'exposures', ')', ':', 'if', 'index', '+', '1', '==', 'len', '(', 'exposures', ')', ':', 'if', 'len', '(', 'exposures', ')', '>', '2', ':', 'exposures_question', '+=', 'tr', '(', "', and '", ')', 'else', ':', 'exposures_question', '+=', 'tr', '(', "' and '", ')', 'elif', 'index', '!=', '0', ':', 'exposures_question', '+=', "', '", 'exposures_question', '+=', 'exposure_format', '.', 'format', '(', 'exposure_measure', '=', 'exposure', '[', "'measure_question'", ']', ',', 'exposure_name', '=', 'exposure', '[', "'name'", ']', ')', 'if', 'hazard', '==', 'hazard_generic', ':', 'question', '=', 'tr', '(', "'In each of the hazard zones, {exposures_question} '", "'might be affected?'", ')', '.', 'format', '(', 'exposures_question', '=', 'exposures_question', ')', 'else', ':', 'question', '=', 'tr', '(', "'In the event of a {hazard_name}, {exposures_question} '", "'might be affected?'", ')', '.', 'format', '(', 'hazard_name', '=', 'hazard', '[', "'name'", ']', ',', 'exposures_question', '=', 'exposures_question', ')', 'return', 'question'] | Construct analysis question based on hazard and exposures.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: A list of exposure definition.
:type exposure: list
:returns: Analysis question based on reporting standards.
:rtype: str | ['Construct', 'analysis', 'question', 'based', 'on', 'hazard', 'and', 'exposures', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/provenance_utilities.py#L84-L120 |
511 | wummel/linkchecker | third_party/dnspython/dns/tsig.py | get_algorithm | def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
global _hashes
if _hashes is None:
_setup_hashes()
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if sys.hexversion < 0x02050200 and \
(algorithm == HMAC_SHA384 or algorithm == HMAC_SHA512):
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" requires Python 2.5.2 or later")
try:
return (algorithm.to_digestable(), _hashes[algorithm])
except KeyError:
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported") | python | def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
global _hashes
if _hashes is None:
_setup_hashes()
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if sys.hexversion < 0x02050200 and \
(algorithm == HMAC_SHA384 or algorithm == HMAC_SHA512):
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" requires Python 2.5.2 or later")
try:
return (algorithm.to_digestable(), _hashes[algorithm])
except KeyError:
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported") | ['def', 'get_algorithm', '(', 'algorithm', ')', ':', 'global', '_hashes', 'if', '_hashes', 'is', 'None', ':', '_setup_hashes', '(', ')', 'if', 'isinstance', '(', 'algorithm', ',', '(', 'str', ',', 'unicode', ')', ')', ':', 'algorithm', '=', 'dns', '.', 'name', '.', 'from_text', '(', 'algorithm', ')', 'if', 'sys', '.', 'hexversion', '<', '0x02050200', 'and', '(', 'algorithm', '==', 'HMAC_SHA384', 'or', 'algorithm', '==', 'HMAC_SHA512', ')', ':', 'raise', 'NotImplementedError', '(', '"TSIG algorithm "', '+', 'str', '(', 'algorithm', ')', '+', '" requires Python 2.5.2 or later"', ')', 'try', ':', 'return', '(', 'algorithm', '.', 'to_digestable', '(', ')', ',', '_hashes', '[', 'algorithm', ']', ')', 'except', 'KeyError', ':', 'raise', 'NotImplementedError', '(', '"TSIG algorithm "', '+', 'str', '(', 'algorithm', ')', '+', '" is not supported"', ')'] | Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported | ['Returns', 'the', 'wire', 'format', 'string', 'and', 'the', 'hash', 'module', 'to', 'use', 'for', 'the', 'specified', 'TSIG', 'algorithm'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/tsig.py#L199-L223 |
512 | gem/oq-engine | openquake/baselib/datastore.py | extract_calc_id_datadir | def extract_calc_id_datadir(filename, datadir=None):
"""
Extract the calculation ID from the given filename or integer:
>>> extract_calc_id_datadir('/mnt/ssd/oqdata/calc_25.hdf5')
(25, '/mnt/ssd/oqdata')
>>> extract_calc_id_datadir('/mnt/ssd/oqdata/wrong_name.hdf5')
Traceback (most recent call last):
...
ValueError: Cannot extract calc_id from /mnt/ssd/oqdata/wrong_name.hdf5
"""
datadir = datadir or get_datadir()
try:
calc_id = int(filename)
except ValueError:
filename = os.path.abspath(filename)
datadir = os.path.dirname(filename)
mo = re.match(CALC_REGEX, os.path.basename(filename))
if mo is None:
raise ValueError('Cannot extract calc_id from %s' % filename)
calc_id = int(mo.group(2))
return calc_id, datadir | python | def extract_calc_id_datadir(filename, datadir=None):
"""
Extract the calculation ID from the given filename or integer:
>>> extract_calc_id_datadir('/mnt/ssd/oqdata/calc_25.hdf5')
(25, '/mnt/ssd/oqdata')
>>> extract_calc_id_datadir('/mnt/ssd/oqdata/wrong_name.hdf5')
Traceback (most recent call last):
...
ValueError: Cannot extract calc_id from /mnt/ssd/oqdata/wrong_name.hdf5
"""
datadir = datadir or get_datadir()
try:
calc_id = int(filename)
except ValueError:
filename = os.path.abspath(filename)
datadir = os.path.dirname(filename)
mo = re.match(CALC_REGEX, os.path.basename(filename))
if mo is None:
raise ValueError('Cannot extract calc_id from %s' % filename)
calc_id = int(mo.group(2))
return calc_id, datadir | ['def', 'extract_calc_id_datadir', '(', 'filename', ',', 'datadir', '=', 'None', ')', ':', 'datadir', '=', 'datadir', 'or', 'get_datadir', '(', ')', 'try', ':', 'calc_id', '=', 'int', '(', 'filename', ')', 'except', 'ValueError', ':', 'filename', '=', 'os', '.', 'path', '.', 'abspath', '(', 'filename', ')', 'datadir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', 'mo', '=', 're', '.', 'match', '(', 'CALC_REGEX', ',', 'os', '.', 'path', '.', 'basename', '(', 'filename', ')', ')', 'if', 'mo', 'is', 'None', ':', 'raise', 'ValueError', '(', "'Cannot extract calc_id from %s'", '%', 'filename', ')', 'calc_id', '=', 'int', '(', 'mo', '.', 'group', '(', '2', ')', ')', 'return', 'calc_id', ',', 'datadir'] | Extract the calculation ID from the given filename or integer:
>>> extract_calc_id_datadir('/mnt/ssd/oqdata/calc_25.hdf5')
(25, '/mnt/ssd/oqdata')
>>> extract_calc_id_datadir('/mnt/ssd/oqdata/wrong_name.hdf5')
Traceback (most recent call last):
...
ValueError: Cannot extract calc_id from /mnt/ssd/oqdata/wrong_name.hdf5 | ['Extract', 'the', 'calculation', 'ID', 'from', 'the', 'given', 'filename', 'or', 'integer', ':'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/datastore.py#L87-L108 |
513 | gwastro/pycbc | pycbc/workflow/configuration.py | WorkflowConfigParser.perform_exe_expansion | def perform_exe_expansion(self):
"""
This function will look through the executables section of the
ConfigParser object and replace any values using macros with full paths.
For any values that look like
${which:lalapps_tmpltbank}
will be replaced with the equivalent of which(lalapps_tmpltbank)
Otherwise values will be unchanged.
"""
# Only works on executables section
if self.has_section('executables'):
for option, value in self.items('executables'):
# Check the value
newStr = self.interpolate_exe(value)
if newStr != value:
self.set('executables', option, newStr) | python | def perform_exe_expansion(self):
"""
This function will look through the executables section of the
ConfigParser object and replace any values using macros with full paths.
For any values that look like
${which:lalapps_tmpltbank}
will be replaced with the equivalent of which(lalapps_tmpltbank)
Otherwise values will be unchanged.
"""
# Only works on executables section
if self.has_section('executables'):
for option, value in self.items('executables'):
# Check the value
newStr = self.interpolate_exe(value)
if newStr != value:
self.set('executables', option, newStr) | ['def', 'perform_exe_expansion', '(', 'self', ')', ':', '# Only works on executables section', 'if', 'self', '.', 'has_section', '(', "'executables'", ')', ':', 'for', 'option', ',', 'value', 'in', 'self', '.', 'items', '(', "'executables'", ')', ':', '# Check the value', 'newStr', '=', 'self', '.', 'interpolate_exe', '(', 'value', ')', 'if', 'newStr', '!=', 'value', ':', 'self', '.', 'set', '(', "'executables'", ',', 'option', ',', 'newStr', ')'] | This function will look through the executables section of the
ConfigParser object and replace any values using macros with full paths.
For any values that look like
${which:lalapps_tmpltbank}
will be replaced with the equivalent of which(lalapps_tmpltbank)
Otherwise values will be unchanged. | ['This', 'function', 'will', 'look', 'through', 'the', 'executables', 'section', 'of', 'the', 'ConfigParser', 'object', 'and', 'replace', 'any', 'values', 'using', 'macros', 'with', 'full', 'paths', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/configuration.py#L520-L539 |
514 | miguelgrinberg/slam | slam/cli.py | on_unexpected_error | def on_unexpected_error(e): # pragma: no cover
"""Catch-all error handler
Unexpected errors will be handled by this function.
"""
sys.stderr.write('Unexpected error: {} ({})\n'.format(
str(e), e.__class__.__name__))
sys.stderr.write('See file slam_error.log for additional details.\n')
sys.exit(1) | python | def on_unexpected_error(e): # pragma: no cover
"""Catch-all error handler
Unexpected errors will be handled by this function.
"""
sys.stderr.write('Unexpected error: {} ({})\n'.format(
str(e), e.__class__.__name__))
sys.stderr.write('See file slam_error.log for additional details.\n')
sys.exit(1) | ['def', 'on_unexpected_error', '(', 'e', ')', ':', '# pragma: no cover', 'sys', '.', 'stderr', '.', 'write', '(', "'Unexpected error: {} ({})\\n'", '.', 'format', '(', 'str', '(', 'e', ')', ',', 'e', '.', '__class__', '.', '__name__', ')', ')', 'sys', '.', 'stderr', '.', 'write', '(', "'See file slam_error.log for additional details.\\n'", ')', 'sys', '.', 'exit', '(', '1', ')'] | Catch-all error handler
Unexpected errors will be handled by this function. | ['Catch', '-', 'all', 'error', 'handler'] | train | https://github.com/miguelgrinberg/slam/blob/cf68a4bbc16d909718f8a9e71072b822e0a3d94b/slam/cli.py#L60-L68 |
515 | maartenbreddels/ipyvolume | ipyvolume/pylab.py | figure | def figure(
key=None,
width=400,
height=500,
lighting=True,
controls=True,
controls_vr=False,
controls_light=False,
debug=False,
**kwargs
):
"""Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure`
"""
if key is not None and key in current.figures:
current.figure = current.figures[key]
current.container = current.containers[key]
elif isinstance(key, ipv.Figure) and key in current.figures.values():
key_index = list(current.figures.values()).index(key)
key = list(current.figures.keys())[key_index]
current.figure = current.figures[key]
current.container = current.containers[key]
else:
current.figure = ipv.Figure(width=width, height=height, **kwargs)
current.container = ipywidgets.VBox()
current.container.children = [current.figure]
if key is None:
key = uuid.uuid4().hex
current.figures[key] = current.figure
current.containers[key] = current.container
if controls:
# stereo = ipywidgets.ToggleButton(value=current.figure.stereo, description='stereo', icon='eye')
# l1 = ipywidgets.jslink((current.figure, 'stereo'), (stereo, 'value'))
# current.container.children += (ipywidgets.HBox([stereo, ]),)
pass # stereo and fullscreen are now include in the js code (per view)
if controls_vr:
eye_separation = ipywidgets.FloatSlider(value=current.figure.eye_separation, min=-10, max=10, icon='eye')
ipywidgets.jslink((eye_separation, 'value'), (current.figure, 'eye_separation'))
current.container.children += (eye_separation,)
if controls_light:
globals()['controls_light']()
if debug:
show = ipywidgets.ToggleButtons(options=["Volume", "Back", "Front", "Coordinate"])
current.container.children += (show,)
# ipywidgets.jslink((current.figure, 'show'), (show, 'value'))
traitlets.link((current.figure, 'show'), (show, 'value'))
return current.figure | python | def figure(
key=None,
width=400,
height=500,
lighting=True,
controls=True,
controls_vr=False,
controls_light=False,
debug=False,
**kwargs
):
"""Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure`
"""
if key is not None and key in current.figures:
current.figure = current.figures[key]
current.container = current.containers[key]
elif isinstance(key, ipv.Figure) and key in current.figures.values():
key_index = list(current.figures.values()).index(key)
key = list(current.figures.keys())[key_index]
current.figure = current.figures[key]
current.container = current.containers[key]
else:
current.figure = ipv.Figure(width=width, height=height, **kwargs)
current.container = ipywidgets.VBox()
current.container.children = [current.figure]
if key is None:
key = uuid.uuid4().hex
current.figures[key] = current.figure
current.containers[key] = current.container
if controls:
# stereo = ipywidgets.ToggleButton(value=current.figure.stereo, description='stereo', icon='eye')
# l1 = ipywidgets.jslink((current.figure, 'stereo'), (stereo, 'value'))
# current.container.children += (ipywidgets.HBox([stereo, ]),)
pass # stereo and fullscreen are now include in the js code (per view)
if controls_vr:
eye_separation = ipywidgets.FloatSlider(value=current.figure.eye_separation, min=-10, max=10, icon='eye')
ipywidgets.jslink((eye_separation, 'value'), (current.figure, 'eye_separation'))
current.container.children += (eye_separation,)
if controls_light:
globals()['controls_light']()
if debug:
show = ipywidgets.ToggleButtons(options=["Volume", "Back", "Front", "Coordinate"])
current.container.children += (show,)
# ipywidgets.jslink((current.figure, 'show'), (show, 'value'))
traitlets.link((current.figure, 'show'), (show, 'value'))
return current.figure | ['def', 'figure', '(', 'key', '=', 'None', ',', 'width', '=', '400', ',', 'height', '=', '500', ',', 'lighting', '=', 'True', ',', 'controls', '=', 'True', ',', 'controls_vr', '=', 'False', ',', 'controls_light', '=', 'False', ',', 'debug', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'if', 'key', 'is', 'not', 'None', 'and', 'key', 'in', 'current', '.', 'figures', ':', 'current', '.', 'figure', '=', 'current', '.', 'figures', '[', 'key', ']', 'current', '.', 'container', '=', 'current', '.', 'containers', '[', 'key', ']', 'elif', 'isinstance', '(', 'key', ',', 'ipv', '.', 'Figure', ')', 'and', 'key', 'in', 'current', '.', 'figures', '.', 'values', '(', ')', ':', 'key_index', '=', 'list', '(', 'current', '.', 'figures', '.', 'values', '(', ')', ')', '.', 'index', '(', 'key', ')', 'key', '=', 'list', '(', 'current', '.', 'figures', '.', 'keys', '(', ')', ')', '[', 'key_index', ']', 'current', '.', 'figure', '=', 'current', '.', 'figures', '[', 'key', ']', 'current', '.', 'container', '=', 'current', '.', 'containers', '[', 'key', ']', 'else', ':', 'current', '.', 'figure', '=', 'ipv', '.', 'Figure', '(', 'width', '=', 'width', ',', 'height', '=', 'height', ',', '*', '*', 'kwargs', ')', 'current', '.', 'container', '=', 'ipywidgets', '.', 'VBox', '(', ')', 'current', '.', 'container', '.', 'children', '=', '[', 'current', '.', 'figure', ']', 'if', 'key', 'is', 'None', ':', 'key', '=', 'uuid', '.', 'uuid4', '(', ')', '.', 'hex', 'current', '.', 'figures', '[', 'key', ']', '=', 'current', '.', 'figure', 'current', '.', 'containers', '[', 'key', ']', '=', 'current', '.', 'container', 'if', 'controls', ':', "# stereo = ipywidgets.ToggleButton(value=current.figure.stereo, description='stereo', icon='eye')", "# l1 = ipywidgets.jslink((current.figure, 'stereo'), (stereo, 'value'))", '# current.container.children += (ipywidgets.HBox([stereo, ]),)', 'pass', '# stereo and fullscreen are now include in the js code (per view)', 'if', 'controls_vr', ':', 'eye_separation', '=', 'ipywidgets', '.', 'FloatSlider', '(', 'value', '=', 'current', '.', 'figure', '.', 'eye_separation', ',', 'min', '=', '-', '10', ',', 'max', '=', '10', ',', 'icon', '=', "'eye'", ')', 'ipywidgets', '.', 'jslink', '(', '(', 'eye_separation', ',', "'value'", ')', ',', '(', 'current', '.', 'figure', ',', "'eye_separation'", ')', ')', 'current', '.', 'container', '.', 'children', '+=', '(', 'eye_separation', ',', ')', 'if', 'controls_light', ':', 'globals', '(', ')', '[', "'controls_light'", ']', '(', ')', 'if', 'debug', ':', 'show', '=', 'ipywidgets', '.', 'ToggleButtons', '(', 'options', '=', '[', '"Volume"', ',', '"Back"', ',', '"Front"', ',', '"Coordinate"', ']', ')', 'current', '.', 'container', '.', 'children', '+=', '(', 'show', ',', ')', "# ipywidgets.jslink((current.figure, 'show'), (show, 'value'))", 'traitlets', '.', 'link', '(', '(', 'current', '.', 'figure', ',', "'show'", ')', ',', '(', 'show', ',', "'value'", ')', ')', 'return', 'current', '.', 'figure'] | Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure` | ['Create', 'a', 'new', 'figure', 'if', 'no', 'key', 'is', 'given', 'or', 'return', 'the', 'figure', 'associated', 'with', 'key', '.'] | train | https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/pylab.py#L168-L222 |
516 | wummel/linkchecker | third_party/dnspython/dns/tokenizer.py | Tokenizer.get_string | def get_string(self, origin=None):
"""Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
return token.value | python | def get_string(self, origin=None):
"""Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
return token.value | ['def', 'get_string', '(', 'self', ',', 'origin', '=', 'None', ')', ':', 'token', '=', 'self', '.', 'get', '(', ')', '.', 'unescape', '(', ')', 'if', 'not', '(', 'token', '.', 'is_identifier', '(', ')', 'or', 'token', '.', 'is_quoted_string', '(', ')', ')', ':', 'raise', 'dns', '.', 'exception', '.', 'SyntaxError', '(', "'expecting a string'", ')', 'return', 'token', '.', 'value'] | Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string | ['Read', 'the', 'next', 'token', 'and', 'interpret', 'it', 'as', 'a', 'string', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/tokenizer.py#L498-L508 |
517 | saltstack/salt | salt/modules/pw_user.py | info | def info(name):
'''
Return user information
CLI Example:
.. code-block:: bash
salt '*' user.info root
'''
ret = {}
try:
data = pwd.getpwnam(name)
ret['gid'] = data.pw_gid
ret['groups'] = list_groups(name)
ret['home'] = data.pw_dir
ret['name'] = data.pw_name
ret['passwd'] = data.pw_passwd
ret['shell'] = data.pw_shell
ret['uid'] = data.pw_uid
# Put GECOS info into a list
gecos_field = data.pw_gecos.split(',', 3)
# Assign empty strings for any unspecified GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
ret['fullname'] = gecos_field[0]
ret['roomnumber'] = gecos_field[1]
ret['workphone'] = gecos_field[2]
ret['homephone'] = gecos_field[3]
except KeyError:
return {}
return ret | python | def info(name):
'''
Return user information
CLI Example:
.. code-block:: bash
salt '*' user.info root
'''
ret = {}
try:
data = pwd.getpwnam(name)
ret['gid'] = data.pw_gid
ret['groups'] = list_groups(name)
ret['home'] = data.pw_dir
ret['name'] = data.pw_name
ret['passwd'] = data.pw_passwd
ret['shell'] = data.pw_shell
ret['uid'] = data.pw_uid
# Put GECOS info into a list
gecos_field = data.pw_gecos.split(',', 3)
# Assign empty strings for any unspecified GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
ret['fullname'] = gecos_field[0]
ret['roomnumber'] = gecos_field[1]
ret['workphone'] = gecos_field[2]
ret['homephone'] = gecos_field[3]
except KeyError:
return {}
return ret | ['def', 'info', '(', 'name', ')', ':', 'ret', '=', '{', '}', 'try', ':', 'data', '=', 'pwd', '.', 'getpwnam', '(', 'name', ')', 'ret', '[', "'gid'", ']', '=', 'data', '.', 'pw_gid', 'ret', '[', "'groups'", ']', '=', 'list_groups', '(', 'name', ')', 'ret', '[', "'home'", ']', '=', 'data', '.', 'pw_dir', 'ret', '[', "'name'", ']', '=', 'data', '.', 'pw_name', 'ret', '[', "'passwd'", ']', '=', 'data', '.', 'pw_passwd', 'ret', '[', "'shell'", ']', '=', 'data', '.', 'pw_shell', 'ret', '[', "'uid'", ']', '=', 'data', '.', 'pw_uid', '# Put GECOS info into a list', 'gecos_field', '=', 'data', '.', 'pw_gecos', '.', 'split', '(', "','", ',', '3', ')', '# Assign empty strings for any unspecified GECOS fields', 'while', 'len', '(', 'gecos_field', ')', '<', '4', ':', 'gecos_field', '.', 'append', '(', "''", ')', 'ret', '[', "'fullname'", ']', '=', 'gecos_field', '[', '0', ']', 'ret', '[', "'roomnumber'", ']', '=', 'gecos_field', '[', '1', ']', 'ret', '[', "'workphone'", ']', '=', 'gecos_field', '[', '2', ']', 'ret', '[', "'homephone'", ']', '=', 'gecos_field', '[', '3', ']', 'except', 'KeyError', ':', 'return', '{', '}', 'return', 'ret'] | Return user information
CLI Example:
.. code-block:: bash
salt '*' user.info root | ['Return', 'user', 'information'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pw_user.py#L428-L459 |
518 | GoogleCloudPlatform/compute-image-packages | packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py | ScriptRetriever._DownloadAuthUrl | def _DownloadAuthUrl(self, url, dest_dir):
"""Download a Google Storage URL using an authentication token.
If the token cannot be fetched, fallback to unauthenticated download.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info(
'Downloading url from %s to %s using authentication token.', url, dest)
if not self.token:
response = self.watcher.GetMetadata(
self.token_metadata_key, recursive=False, retry=False)
if not response:
self.logger.info(
'Authentication token not found. Attempting unauthenticated '
'download.')
return self._DownloadUrl(url, dest_dir)
self.token = '%s %s' % (
response.get('token_type', ''), response.get('access_token', ''))
try:
request = urlrequest.Request(url)
request.add_unredirected_header('Metadata-Flavor', 'Google')
request.add_unredirected_header('Authorization', self.token)
content = urlrequest.urlopen(request).read().decode('utf-8')
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
return None
with open(dest, 'wb') as f:
f.write(content)
return dest | python | def _DownloadAuthUrl(self, url, dest_dir):
"""Download a Google Storage URL using an authentication token.
If the token cannot be fetched, fallback to unauthenticated download.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info(
'Downloading url from %s to %s using authentication token.', url, dest)
if not self.token:
response = self.watcher.GetMetadata(
self.token_metadata_key, recursive=False, retry=False)
if not response:
self.logger.info(
'Authentication token not found. Attempting unauthenticated '
'download.')
return self._DownloadUrl(url, dest_dir)
self.token = '%s %s' % (
response.get('token_type', ''), response.get('access_token', ''))
try:
request = urlrequest.Request(url)
request.add_unredirected_header('Metadata-Flavor', 'Google')
request.add_unredirected_header('Authorization', self.token)
content = urlrequest.urlopen(request).read().decode('utf-8')
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
return None
with open(dest, 'wb') as f:
f.write(content)
return dest | ['def', '_DownloadAuthUrl', '(', 'self', ',', 'url', ',', 'dest_dir', ')', ':', 'dest_file', '=', 'tempfile', '.', 'NamedTemporaryFile', '(', 'dir', '=', 'dest_dir', ',', 'delete', '=', 'False', ')', 'dest_file', '.', 'close', '(', ')', 'dest', '=', 'dest_file', '.', 'name', 'self', '.', 'logger', '.', 'info', '(', "'Downloading url from %s to %s using authentication token.'", ',', 'url', ',', 'dest', ')', 'if', 'not', 'self', '.', 'token', ':', 'response', '=', 'self', '.', 'watcher', '.', 'GetMetadata', '(', 'self', '.', 'token_metadata_key', ',', 'recursive', '=', 'False', ',', 'retry', '=', 'False', ')', 'if', 'not', 'response', ':', 'self', '.', 'logger', '.', 'info', '(', "'Authentication token not found. Attempting unauthenticated '", "'download.'", ')', 'return', 'self', '.', '_DownloadUrl', '(', 'url', ',', 'dest_dir', ')', 'self', '.', 'token', '=', "'%s %s'", '%', '(', 'response', '.', 'get', '(', "'token_type'", ',', "''", ')', ',', 'response', '.', 'get', '(', "'access_token'", ',', "''", ')', ')', 'try', ':', 'request', '=', 'urlrequest', '.', 'Request', '(', 'url', ')', 'request', '.', 'add_unredirected_header', '(', "'Metadata-Flavor'", ',', "'Google'", ')', 'request', '.', 'add_unredirected_header', '(', "'Authorization'", ',', 'self', '.', 'token', ')', 'content', '=', 'urlrequest', '.', 'urlopen', '(', 'request', ')', '.', 'read', '(', ')', '.', 'decode', '(', "'utf-8'", ')', 'except', '(', 'httpclient', '.', 'HTTPException', ',', 'socket', '.', 'error', ',', 'urlerror', '.', 'URLError', ')', 'as', 'e', ':', 'self', '.', 'logger', '.', 'warning', '(', "'Could not download %s. %s.'", ',', 'url', ',', 'str', '(', 'e', ')', ')', 'return', 'None', 'with', 'open', '(', 'dest', ',', "'wb'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'content', ')', 'return', 'dest'] | Download a Google Storage URL using an authentication token.
If the token cannot be fetched, fallback to unauthenticated download.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script. | ['Download', 'a', 'Google', 'Storage', 'URL', 'using', 'an', 'authentication', 'token', '.'] | train | https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py#L48-L92 |
519 | Metatab/metapack | metapack/cli/metaaws.py | get_iam_account | def get_iam_account(l, args, user_name):
"""Return the local Account for a user name, by fetching User and looking up
the arn. """
iam = get_resource(args, 'iam')
user = iam.User(user_name)
user.load()
return l.find_or_new_account(user.arn) | python | def get_iam_account(l, args, user_name):
"""Return the local Account for a user name, by fetching User and looking up
the arn. """
iam = get_resource(args, 'iam')
user = iam.User(user_name)
user.load()
return l.find_or_new_account(user.arn) | ['def', 'get_iam_account', '(', 'l', ',', 'args', ',', 'user_name', ')', ':', 'iam', '=', 'get_resource', '(', 'args', ',', "'iam'", ')', 'user', '=', 'iam', '.', 'User', '(', 'user_name', ')', 'user', '.', 'load', '(', ')', 'return', 'l', '.', 'find_or_new_account', '(', 'user', '.', 'arn', ')'] | Return the local Account for a user name, by fetching User and looking up
the arn. | ['Return', 'the', 'local', 'Account', 'for', 'a', 'user', 'name', 'by', 'fetching', 'User', 'and', 'looking', 'up', 'the', 'arn', '.'] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L540-L548 |
520 | codelv/enaml-native-barcode | src/zxing/android/android_barcode.py | IntentIntegrator.scan | def scan(cls, formats=ALL_CODE_TYPES, camera=-1):
""" Shortcut only one at a time will work... """
app = AndroidApplication.instance()
r = app.create_future()
#: Initiate a scan
pkg = BarcodePackage.instance()
pkg.setBarcodeResultListener(pkg.getId())
pkg.onBarcodeResult.connect(r.set_result)
intent = cls(app)
if formats:
intent.setDesiredBarcodeFormats(formats)
if camera != -1:
intent.setCameraId(camera)
intent.initiateScan()
return r | python | def scan(cls, formats=ALL_CODE_TYPES, camera=-1):
""" Shortcut only one at a time will work... """
app = AndroidApplication.instance()
r = app.create_future()
#: Initiate a scan
pkg = BarcodePackage.instance()
pkg.setBarcodeResultListener(pkg.getId())
pkg.onBarcodeResult.connect(r.set_result)
intent = cls(app)
if formats:
intent.setDesiredBarcodeFormats(formats)
if camera != -1:
intent.setCameraId(camera)
intent.initiateScan()
return r | ['def', 'scan', '(', 'cls', ',', 'formats', '=', 'ALL_CODE_TYPES', ',', 'camera', '=', '-', '1', ')', ':', 'app', '=', 'AndroidApplication', '.', 'instance', '(', ')', 'r', '=', 'app', '.', 'create_future', '(', ')', '#: Initiate a scan', 'pkg', '=', 'BarcodePackage', '.', 'instance', '(', ')', 'pkg', '.', 'setBarcodeResultListener', '(', 'pkg', '.', 'getId', '(', ')', ')', 'pkg', '.', 'onBarcodeResult', '.', 'connect', '(', 'r', '.', 'set_result', ')', 'intent', '=', 'cls', '(', 'app', ')', 'if', 'formats', ':', 'intent', '.', 'setDesiredBarcodeFormats', '(', 'formats', ')', 'if', 'camera', '!=', '-', '1', ':', 'intent', '.', 'setCameraId', '(', 'camera', ')', 'intent', '.', 'initiateScan', '(', ')', 'return', 'r'] | Shortcut only one at a time will work... | ['Shortcut', 'only', 'one', 'at', 'a', 'time', 'will', 'work', '...'] | train | https://github.com/codelv/enaml-native-barcode/blob/dc3c4b41980c0f93d7fa828f48a751ae26daf297/src/zxing/android/android_barcode.py#L66-L83 |
521 | Cognexa/cxflow | cxflow/utils/reflection.py | list_submodules | def list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index
"""
List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed
"""
_module = importlib.import_module(module_name)
return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)] | python | def list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index
"""
List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed
"""
_module = importlib.import_module(module_name)
return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)] | ['def', 'list_submodules', '(', 'module_name', ':', 'str', ')', '->', 'List', '[', 'str', ']', ':', '# pylint: disable=invalid-sequence-index', '_module', '=', 'importlib', '.', 'import_module', '(', 'module_name', ')', 'return', '[', 'module_name', '+', "'.'", '+', 'submodule_name', 'for', '_', ',', 'submodule_name', ',', '_', 'in', 'pkgutil', '.', 'iter_modules', '(', '_module', '.', '__path__', ')', ']'] | List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed | ['List', 'full', 'names', 'of', 'all', 'the', 'submodules', 'in', 'the', 'given', 'module', '.'] | train | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/reflection.py#L64-L71 |
522 | novopl/peltak | src/peltak/extra/gitflow/logic/common.py | get_base_branch | def get_base_branch():
# type: () -> str
""" Return the base branch for the current branch.
This function will first try to guess the base branch and if it can't it
will let the user choose the branch from the list of all local branches.
Returns:
str: The name of the branch the current branch is based on.
"""
base_branch = git.guess_base_branch()
if base_branch is None:
log.info("Can't guess the base branch, you have to pick one yourself:")
base_branch = choose_branch()
return base_branch | python | def get_base_branch():
# type: () -> str
""" Return the base branch for the current branch.
This function will first try to guess the base branch and if it can't it
will let the user choose the branch from the list of all local branches.
Returns:
str: The name of the branch the current branch is based on.
"""
base_branch = git.guess_base_branch()
if base_branch is None:
log.info("Can't guess the base branch, you have to pick one yourself:")
base_branch = choose_branch()
return base_branch | ['def', 'get_base_branch', '(', ')', ':', '# type: () -> str', 'base_branch', '=', 'git', '.', 'guess_base_branch', '(', ')', 'if', 'base_branch', 'is', 'None', ':', 'log', '.', 'info', '(', '"Can\'t guess the base branch, you have to pick one yourself:"', ')', 'base_branch', '=', 'choose_branch', '(', ')', 'return', 'base_branch'] | Return the base branch for the current branch.
This function will first try to guess the base branch and if it can't it
will let the user choose the branch from the list of all local branches.
Returns:
str: The name of the branch the current branch is based on. | ['Return', 'the', 'base', 'branch', 'for', 'the', 'current', 'branch', '.'] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/common.py#L178-L194 |
523 | wummel/linkchecker | linkcheck/configuration/confparse.py | LCConfigParser.read | def read (self, files):
"""Read settings from given config files.
@raises: LinkCheckerError on syntax errors in the config file(s)
"""
assert isinstance(files, list), "Invalid file list %r" % files
try:
self.read_ok = super(LCConfigParser, self).read(files)
if len(self.read_ok) < len(files):
failed_files = set(files) - set(self.read_ok)
log.warn(LOG_CHECK, "Could not read configuration files %s.", failed_files)
# Read all the configuration parameters from the given files.
self.read_checking_config()
self.read_authentication_config()
self.read_filtering_config()
self.read_output_config()
self.read_plugin_config()
except Exception as msg:
raise LinkCheckerError(
_("Error parsing configuration: %s") % unicode(msg)) | python | def read (self, files):
"""Read settings from given config files.
@raises: LinkCheckerError on syntax errors in the config file(s)
"""
assert isinstance(files, list), "Invalid file list %r" % files
try:
self.read_ok = super(LCConfigParser, self).read(files)
if len(self.read_ok) < len(files):
failed_files = set(files) - set(self.read_ok)
log.warn(LOG_CHECK, "Could not read configuration files %s.", failed_files)
# Read all the configuration parameters from the given files.
self.read_checking_config()
self.read_authentication_config()
self.read_filtering_config()
self.read_output_config()
self.read_plugin_config()
except Exception as msg:
raise LinkCheckerError(
_("Error parsing configuration: %s") % unicode(msg)) | ['def', 'read', '(', 'self', ',', 'files', ')', ':', 'assert', 'isinstance', '(', 'files', ',', 'list', ')', ',', '"Invalid file list %r"', '%', 'files', 'try', ':', 'self', '.', 'read_ok', '=', 'super', '(', 'LCConfigParser', ',', 'self', ')', '.', 'read', '(', 'files', ')', 'if', 'len', '(', 'self', '.', 'read_ok', ')', '<', 'len', '(', 'files', ')', ':', 'failed_files', '=', 'set', '(', 'files', ')', '-', 'set', '(', 'self', '.', 'read_ok', ')', 'log', '.', 'warn', '(', 'LOG_CHECK', ',', '"Could not read configuration files %s."', ',', 'failed_files', ')', '# Read all the configuration parameters from the given files.', 'self', '.', 'read_checking_config', '(', ')', 'self', '.', 'read_authentication_config', '(', ')', 'self', '.', 'read_filtering_config', '(', ')', 'self', '.', 'read_output_config', '(', ')', 'self', '.', 'read_plugin_config', '(', ')', 'except', 'Exception', 'as', 'msg', ':', 'raise', 'LinkCheckerError', '(', '_', '(', '"Error parsing configuration: %s"', ')', '%', 'unicode', '(', 'msg', ')', ')'] | Read settings from given config files.
@raises: LinkCheckerError on syntax errors in the config file(s) | ['Read', 'settings', 'from', 'given', 'config', 'files', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/configuration/confparse.py#L43-L62 |
524 | gem/oq-engine | openquake/calculators/ucerf_base.py | get_rupture_surface | def get_rupture_surface(mag, nodal_plane, hypocenter, msr,
rupture_aspect_ratio, upper_seismogenic_depth,
lower_seismogenic_depth, mesh_spacing=1.0):
"""
Create and return rupture surface object with given properties.
:param mag:
Magnitude value, used to calculate rupture dimensions,
see :meth:`_get_rupture_dimensions`.
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`
describing the rupture orientation.
:param hypocenter:
Point representing rupture's hypocenter.
:returns:
Instance of
:class:`~openquake.hazardlib.geo.surface.planar.PlanarSurface`.
"""
assert (upper_seismogenic_depth <= hypocenter.depth
and lower_seismogenic_depth >= hypocenter.depth)
rdip = math.radians(nodal_plane.dip)
# precalculated azimuth values for horizontal-only and vertical-only
# moves from one point to another on the plane defined by strike
# and dip:
azimuth_right = nodal_plane.strike
azimuth_down = (azimuth_right + 90) % 360
azimuth_left = (azimuth_down + 90) % 360
azimuth_up = (azimuth_left + 90) % 360
rup_length, rup_width = get_rupture_dimensions(
mag, nodal_plane, msr, rupture_aspect_ratio, upper_seismogenic_depth,
lower_seismogenic_depth)
# calculate the height of the rupture being projected
# on the vertical plane:
rup_proj_height = rup_width * math.sin(rdip)
# and it's width being projected on the horizontal one:
rup_proj_width = rup_width * math.cos(rdip)
# half height of the vertical component of rupture width
# is the vertical distance between the rupture geometrical
# center and it's upper and lower borders:
hheight = rup_proj_height / 2
# calculate how much shallower the upper border of the rupture
# is than the upper seismogenic depth:
vshift = upper_seismogenic_depth - hypocenter.depth + hheight
# if it is shallower (vshift > 0) than we need to move the rupture
# by that value vertically.
if vshift < 0:
# the top edge is below upper seismogenic depth. now we need
# to check that we do not cross the lower border.
vshift = lower_seismogenic_depth - hypocenter.depth - hheight
if vshift > 0:
# the bottom edge of the rupture is above the lower sesmogenic
# depth. that means that we don't need to move the rupture
# as it fits inside seismogenic layer.
vshift = 0
# if vshift < 0 than we need to move the rupture up by that value.
# now we need to find the position of rupture's geometrical center.
# in any case the hypocenter point must lie on the surface, however
# the rupture center might be off (below or above) along the dip.
rupture_center = hypocenter
if vshift != 0:
# we need to move the rupture center to make the rupture fit
# inside the seismogenic layer.
hshift = abs(vshift / math.tan(rdip))
rupture_center = rupture_center.point_at(
horizontal_distance=hshift, vertical_increment=vshift,
azimuth=(azimuth_up if vshift < 0 else azimuth_down))
# from the rupture center we can now compute the coordinates of the
# four coorners by moving along the diagonals of the plane. This seems
# to be better then moving along the perimeter, because in this case
# errors are accumulated that induce distorsions in the shape with
# consequent raise of exceptions when creating PlanarSurface objects
# theta is the angle between the diagonal of the surface projection
# and the line passing through the rupture center and parallel to the
# top and bottom edges. Theta is zero for vertical ruptures (because
# rup_proj_width is zero)
theta = math.degrees(
math.atan((rup_proj_width / 2.) / (rup_length / 2.)))
hor_dist = math.sqrt(
(rup_length / 2.) ** 2 + (rup_proj_width / 2.) ** 2)
left_top = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=-rup_proj_height / 2,
azimuth=(nodal_plane.strike + 180 + theta) % 360)
right_top = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=-rup_proj_height / 2,
azimuth=(nodal_plane.strike - theta) % 360)
left_bottom = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=rup_proj_height / 2,
azimuth=(nodal_plane.strike + 180 - theta) % 360)
right_bottom = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=rup_proj_height / 2,
azimuth=(nodal_plane.strike + theta) % 360)
return PlanarSurface(nodal_plane.strike, nodal_plane.dip,
left_top, right_top, right_bottom, left_bottom) | python | def get_rupture_surface(mag, nodal_plane, hypocenter, msr,
rupture_aspect_ratio, upper_seismogenic_depth,
lower_seismogenic_depth, mesh_spacing=1.0):
"""
Create and return rupture surface object with given properties.
:param mag:
Magnitude value, used to calculate rupture dimensions,
see :meth:`_get_rupture_dimensions`.
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`
describing the rupture orientation.
:param hypocenter:
Point representing rupture's hypocenter.
:returns:
Instance of
:class:`~openquake.hazardlib.geo.surface.planar.PlanarSurface`.
"""
assert (upper_seismogenic_depth <= hypocenter.depth
and lower_seismogenic_depth >= hypocenter.depth)
rdip = math.radians(nodal_plane.dip)
# precalculated azimuth values for horizontal-only and vertical-only
# moves from one point to another on the plane defined by strike
# and dip:
azimuth_right = nodal_plane.strike
azimuth_down = (azimuth_right + 90) % 360
azimuth_left = (azimuth_down + 90) % 360
azimuth_up = (azimuth_left + 90) % 360
rup_length, rup_width = get_rupture_dimensions(
mag, nodal_plane, msr, rupture_aspect_ratio, upper_seismogenic_depth,
lower_seismogenic_depth)
# calculate the height of the rupture being projected
# on the vertical plane:
rup_proj_height = rup_width * math.sin(rdip)
# and it's width being projected on the horizontal one:
rup_proj_width = rup_width * math.cos(rdip)
# half height of the vertical component of rupture width
# is the vertical distance between the rupture geometrical
# center and it's upper and lower borders:
hheight = rup_proj_height / 2
# calculate how much shallower the upper border of the rupture
# is than the upper seismogenic depth:
vshift = upper_seismogenic_depth - hypocenter.depth + hheight
# if it is shallower (vshift > 0) than we need to move the rupture
# by that value vertically.
if vshift < 0:
# the top edge is below upper seismogenic depth. now we need
# to check that we do not cross the lower border.
vshift = lower_seismogenic_depth - hypocenter.depth - hheight
if vshift > 0:
# the bottom edge of the rupture is above the lower sesmogenic
# depth. that means that we don't need to move the rupture
# as it fits inside seismogenic layer.
vshift = 0
# if vshift < 0 than we need to move the rupture up by that value.
# now we need to find the position of rupture's geometrical center.
# in any case the hypocenter point must lie on the surface, however
# the rupture center might be off (below or above) along the dip.
rupture_center = hypocenter
if vshift != 0:
# we need to move the rupture center to make the rupture fit
# inside the seismogenic layer.
hshift = abs(vshift / math.tan(rdip))
rupture_center = rupture_center.point_at(
horizontal_distance=hshift, vertical_increment=vshift,
azimuth=(azimuth_up if vshift < 0 else azimuth_down))
# from the rupture center we can now compute the coordinates of the
# four coorners by moving along the diagonals of the plane. This seems
# to be better then moving along the perimeter, because in this case
# errors are accumulated that induce distorsions in the shape with
# consequent raise of exceptions when creating PlanarSurface objects
# theta is the angle between the diagonal of the surface projection
# and the line passing through the rupture center and parallel to the
# top and bottom edges. Theta is zero for vertical ruptures (because
# rup_proj_width is zero)
theta = math.degrees(
math.atan((rup_proj_width / 2.) / (rup_length / 2.)))
hor_dist = math.sqrt(
(rup_length / 2.) ** 2 + (rup_proj_width / 2.) ** 2)
left_top = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=-rup_proj_height / 2,
azimuth=(nodal_plane.strike + 180 + theta) % 360)
right_top = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=-rup_proj_height / 2,
azimuth=(nodal_plane.strike - theta) % 360)
left_bottom = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=rup_proj_height / 2,
azimuth=(nodal_plane.strike + 180 - theta) % 360)
right_bottom = rupture_center.point_at(
horizontal_distance=hor_dist,
vertical_increment=rup_proj_height / 2,
azimuth=(nodal_plane.strike + theta) % 360)
return PlanarSurface(nodal_plane.strike, nodal_plane.dip,
left_top, right_top, right_bottom, left_bottom) | ['def', 'get_rupture_surface', '(', 'mag', ',', 'nodal_plane', ',', 'hypocenter', ',', 'msr', ',', 'rupture_aspect_ratio', ',', 'upper_seismogenic_depth', ',', 'lower_seismogenic_depth', ',', 'mesh_spacing', '=', '1.0', ')', ':', 'assert', '(', 'upper_seismogenic_depth', '<=', 'hypocenter', '.', 'depth', 'and', 'lower_seismogenic_depth', '>=', 'hypocenter', '.', 'depth', ')', 'rdip', '=', 'math', '.', 'radians', '(', 'nodal_plane', '.', 'dip', ')', '# precalculated azimuth values for horizontal-only and vertical-only', '# moves from one point to another on the plane defined by strike', '# and dip:', 'azimuth_right', '=', 'nodal_plane', '.', 'strike', 'azimuth_down', '=', '(', 'azimuth_right', '+', '90', ')', '%', '360', 'azimuth_left', '=', '(', 'azimuth_down', '+', '90', ')', '%', '360', 'azimuth_up', '=', '(', 'azimuth_left', '+', '90', ')', '%', '360', 'rup_length', ',', 'rup_width', '=', 'get_rupture_dimensions', '(', 'mag', ',', 'nodal_plane', ',', 'msr', ',', 'rupture_aspect_ratio', ',', 'upper_seismogenic_depth', ',', 'lower_seismogenic_depth', ')', '# calculate the height of the rupture being projected', '# on the vertical plane:', 'rup_proj_height', '=', 'rup_width', '*', 'math', '.', 'sin', '(', 'rdip', ')', "# and it's width being projected on the horizontal one:", 'rup_proj_width', '=', 'rup_width', '*', 'math', '.', 'cos', '(', 'rdip', ')', '# half height of the vertical component of rupture width', '# is the vertical distance between the rupture geometrical', "# center and it's upper and lower borders:", 'hheight', '=', 'rup_proj_height', '/', '2', '# calculate how much shallower the upper border of the rupture', '# is than the upper seismogenic depth:', 'vshift', '=', 'upper_seismogenic_depth', '-', 'hypocenter', '.', 'depth', '+', 'hheight', '# if it is shallower (vshift > 0) than we need to move the rupture', '# by that value vertically.', 'if', 'vshift', '<', '0', ':', '# the top edge is below upper seismogenic depth. now we need', '# to check that we do not cross the lower border.', 'vshift', '=', 'lower_seismogenic_depth', '-', 'hypocenter', '.', 'depth', '-', 'hheight', 'if', 'vshift', '>', '0', ':', '# the bottom edge of the rupture is above the lower sesmogenic', "# depth. that means that we don't need to move the rupture", '# as it fits inside seismogenic layer.', 'vshift', '=', '0', '# if vshift < 0 than we need to move the rupture up by that value.', "# now we need to find the position of rupture's geometrical center.", '# in any case the hypocenter point must lie on the surface, however', '# the rupture center might be off (below or above) along the dip.', 'rupture_center', '=', 'hypocenter', 'if', 'vshift', '!=', '0', ':', '# we need to move the rupture center to make the rupture fit', '# inside the seismogenic layer.', 'hshift', '=', 'abs', '(', 'vshift', '/', 'math', '.', 'tan', '(', 'rdip', ')', ')', 'rupture_center', '=', 'rupture_center', '.', 'point_at', '(', 'horizontal_distance', '=', 'hshift', ',', 'vertical_increment', '=', 'vshift', ',', 'azimuth', '=', '(', 'azimuth_up', 'if', 'vshift', '<', '0', 'else', 'azimuth_down', ')', ')', '# from the rupture center we can now compute the coordinates of the', '# four coorners by moving along the diagonals of the plane. This seems', '# to be better then moving along the perimeter, because in this case', '# errors are accumulated that induce distorsions in the shape with', '# consequent raise of exceptions when creating PlanarSurface objects', '# theta is the angle between the diagonal of the surface projection', '# and the line passing through the rupture center and parallel to the', '# top and bottom edges. Theta is zero for vertical ruptures (because', '# rup_proj_width is zero)', 'theta', '=', 'math', '.', 'degrees', '(', 'math', '.', 'atan', '(', '(', 'rup_proj_width', '/', '2.', ')', '/', '(', 'rup_length', '/', '2.', ')', ')', ')', 'hor_dist', '=', 'math', '.', 'sqrt', '(', '(', 'rup_length', '/', '2.', ')', '**', '2', '+', '(', 'rup_proj_width', '/', '2.', ')', '**', '2', ')', 'left_top', '=', 'rupture_center', '.', 'point_at', '(', 'horizontal_distance', '=', 'hor_dist', ',', 'vertical_increment', '=', '-', 'rup_proj_height', '/', '2', ',', 'azimuth', '=', '(', 'nodal_plane', '.', 'strike', '+', '180', '+', 'theta', ')', '%', '360', ')', 'right_top', '=', 'rupture_center', '.', 'point_at', '(', 'horizontal_distance', '=', 'hor_dist', ',', 'vertical_increment', '=', '-', 'rup_proj_height', '/', '2', ',', 'azimuth', '=', '(', 'nodal_plane', '.', 'strike', '-', 'theta', ')', '%', '360', ')', 'left_bottom', '=', 'rupture_center', '.', 'point_at', '(', 'horizontal_distance', '=', 'hor_dist', ',', 'vertical_increment', '=', 'rup_proj_height', '/', '2', ',', 'azimuth', '=', '(', 'nodal_plane', '.', 'strike', '+', '180', '-', 'theta', ')', '%', '360', ')', 'right_bottom', '=', 'rupture_center', '.', 'point_at', '(', 'horizontal_distance', '=', 'hor_dist', ',', 'vertical_increment', '=', 'rup_proj_height', '/', '2', ',', 'azimuth', '=', '(', 'nodal_plane', '.', 'strike', '+', 'theta', ')', '%', '360', ')', 'return', 'PlanarSurface', '(', 'nodal_plane', '.', 'strike', ',', 'nodal_plane', '.', 'dip', ',', 'left_top', ',', 'right_top', ',', 'right_bottom', ',', 'left_bottom', ')'] | Create and return rupture surface object with given properties.
:param mag:
Magnitude value, used to calculate rupture dimensions,
see :meth:`_get_rupture_dimensions`.
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`
describing the rupture orientation.
:param hypocenter:
Point representing rupture's hypocenter.
:returns:
Instance of
:class:`~openquake.hazardlib.geo.surface.planar.PlanarSurface`. | ['Create', 'and', 'return', 'rupture', 'surface', 'object', 'with', 'given', 'properties', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/ucerf_base.py#L492-L593 |
525 | Yelp/detect-secrets | detect_secrets/core/secrets_collection.py | SecretsCollection.json | def json(self):
"""Custom JSON encoder"""
output = {}
for filename in self.data:
output[filename] = []
for secret_hash in self.data[filename]:
tmp = self.data[filename][secret_hash].json()
del tmp['filename'] # Because filename will map to the secrets
output[filename].append(tmp)
return output | python | def json(self):
"""Custom JSON encoder"""
output = {}
for filename in self.data:
output[filename] = []
for secret_hash in self.data[filename]:
tmp = self.data[filename][secret_hash].json()
del tmp['filename'] # Because filename will map to the secrets
output[filename].append(tmp)
return output | ['def', 'json', '(', 'self', ')', ':', 'output', '=', '{', '}', 'for', 'filename', 'in', 'self', '.', 'data', ':', 'output', '[', 'filename', ']', '=', '[', ']', 'for', 'secret_hash', 'in', 'self', '.', 'data', '[', 'filename', ']', ':', 'tmp', '=', 'self', '.', 'data', '[', 'filename', ']', '[', 'secret_hash', ']', '.', 'json', '(', ')', 'del', 'tmp', '[', "'filename'", ']', '# Because filename will map to the secrets', 'output', '[', 'filename', ']', '.', 'append', '(', 'tmp', ')', 'return', 'output'] | Custom JSON encoder | ['Custom', 'JSON', 'encoder'] | train | https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/secrets_collection.py#L336-L348 |
526 | spyder-ide/spyder | spyder/plugins/explorer/widgets.py | DirView.setup | def setup(self, name_filters=['*.py', '*.pyw'], show_all=False,
single_click_to_open=False):
"""Setup tree widget"""
self.setup_view()
self.set_name_filters(name_filters)
self.show_all = show_all
self.single_click_to_open = single_click_to_open
# Setup context menu
self.menu = QMenu(self)
self.common_actions = self.setup_common_actions() | python | def setup(self, name_filters=['*.py', '*.pyw'], show_all=False,
single_click_to_open=False):
"""Setup tree widget"""
self.setup_view()
self.set_name_filters(name_filters)
self.show_all = show_all
self.single_click_to_open = single_click_to_open
# Setup context menu
self.menu = QMenu(self)
self.common_actions = self.setup_common_actions() | ['def', 'setup', '(', 'self', ',', 'name_filters', '=', '[', "'*.py'", ',', "'*.pyw'", ']', ',', 'show_all', '=', 'False', ',', 'single_click_to_open', '=', 'False', ')', ':', 'self', '.', 'setup_view', '(', ')', 'self', '.', 'set_name_filters', '(', 'name_filters', ')', 'self', '.', 'show_all', '=', 'show_all', 'self', '.', 'single_click_to_open', '=', 'single_click_to_open', '# Setup context menu\r', 'self', '.', 'menu', '=', 'QMenu', '(', 'self', ')', 'self', '.', 'common_actions', '=', 'self', '.', 'setup_common_actions', '(', ')'] | Setup tree widget | ['Setup', 'tree', 'widget'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L238-L249 |
527 | caesar0301/relogger | relogger/syslog.py | HEADER.timestamp | def timestamp(self, value):
"""
The local time when the message was written.
Must follow the format 'Mmm DD HH:MM:SS'. If
the day of the month is less than 10, then it
MUST be represented as a space and then the
number.
"""
if not self._timestamp_is_valid(value):
value = self._calculate_current_timestamp()
self._timestamp = value | python | def timestamp(self, value):
"""
The local time when the message was written.
Must follow the format 'Mmm DD HH:MM:SS'. If
the day of the month is less than 10, then it
MUST be represented as a space and then the
number.
"""
if not self._timestamp_is_valid(value):
value = self._calculate_current_timestamp()
self._timestamp = value | ['def', 'timestamp', '(', 'self', ',', 'value', ')', ':', 'if', 'not', 'self', '.', '_timestamp_is_valid', '(', 'value', ')', ':', 'value', '=', 'self', '.', '_calculate_current_timestamp', '(', ')', 'self', '.', '_timestamp', '=', 'value'] | The local time when the message was written.
Must follow the format 'Mmm DD HH:MM:SS'. If
the day of the month is less than 10, then it
MUST be represented as a space and then the
number. | ['The', 'local', 'time', 'when', 'the', 'message', 'was', 'written', '.'] | train | https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L118-L130 |
528 | ArchiveTeam/wpull | wpull/writer.py | BaseFileWriterSession._process_file_continue_request | def _process_file_continue_request(self, request: BaseRequest):
'''Modify the request to resume downloading file.'''
if os.path.exists(self._filename):
size = os.path.getsize(self._filename)
request.set_continue(size)
self._file_continue_requested = True
_logger.debug('Continue file from {0}.', size)
else:
_logger.debug('No file to continue.') | python | def _process_file_continue_request(self, request: BaseRequest):
'''Modify the request to resume downloading file.'''
if os.path.exists(self._filename):
size = os.path.getsize(self._filename)
request.set_continue(size)
self._file_continue_requested = True
_logger.debug('Continue file from {0}.', size)
else:
_logger.debug('No file to continue.') | ['def', '_process_file_continue_request', '(', 'self', ',', 'request', ':', 'BaseRequest', ')', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'self', '.', '_filename', ')', ':', 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'self', '.', '_filename', ')', 'request', '.', 'set_continue', '(', 'size', ')', 'self', '.', '_file_continue_requested', '=', 'True', '_logger', '.', 'debug', '(', "'Continue file from {0}.'", ',', 'size', ')', 'else', ':', '_logger', '.', 'debug', '(', "'No file to continue.'", ')'] | Modify the request to resume downloading file. | ['Modify', 'the', 'request', 'to', 'resume', 'downloading', 'file', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/writer.py#L189-L198 |
529 | DerMitch/fritzbox-smarthome | fritzhome/actor.py | Actor.get_target_temperature | def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature | python | def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature | ['def', 'get_target_temperature', '(', 'self', ')', ':', 'value', '=', 'self', '.', 'box', '.', 'homeautoswitch', '(', '"gethkrtsoll"', ',', 'self', '.', 'actor_id', ')', 'self', '.', 'target_temperature', '=', 'self', '.', '__get_temp', '(', 'value', ')', 'return', 'self', '.', 'target_temperature'] | Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown. | ['Returns', 'the', 'actual', 'target', 'temperature', '.', 'Attention', ':', 'Returns', 'None', 'if', 'the', 'value', 'can', 't', 'be', 'queried', 'or', 'is', 'unknown', '.'] | train | https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/actor.py#L125-L132 |
530 | OSSOS/MOP | src/ossos/core/ossos/mop_file.py | MOPFile.filename | def filename(self):
"""
Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename
"""
if self._filename is None:
self._filename = storage.get_file(self.basename,
self.ccd,
ext=self.extension,
version=self.type,
prefix=self.prefix)
return self._filename | python | def filename(self):
"""
Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename
"""
if self._filename is None:
self._filename = storage.get_file(self.basename,
self.ccd,
ext=self.extension,
version=self.type,
prefix=self.prefix)
return self._filename | ['def', 'filename', '(', 'self', ')', ':', 'if', 'self', '.', '_filename', 'is', 'None', ':', 'self', '.', '_filename', '=', 'storage', '.', 'get_file', '(', 'self', '.', 'basename', ',', 'self', '.', 'ccd', ',', 'ext', '=', 'self', '.', 'extension', ',', 'version', '=', 'self', '.', 'type', ',', 'prefix', '=', 'self', '.', 'prefix', ')', 'return', 'self', '.', '_filename'] | Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename | ['Name', 'if', 'the', 'MOP', 'formatted', 'file', 'to', 'parse', '.'] | train | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/mop_file.py#L28-L40 |
531 | nerdvegas/rez | src/rez/utils/filesystem.py | safe_chmod | def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode) | python | def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode) | ['def', 'safe_chmod', '(', 'path', ',', 'mode', ')', ':', 'if', 'stat', '.', 'S_IMODE', '(', 'os', '.', 'stat', '(', 'path', ')', '.', 'st_mode', ')', '!=', 'mode', ':', 'os', '.', 'chmod', '(', 'path', ',', 'mode', ')'] | Set the permissions mode on path, but only if it differs from the current mode. | ['Set', 'the', 'permissions', 'mode', 'on', 'path', 'but', 'only', 'if', 'it', 'differs', 'from', 'the', 'current', 'mode', '.'] | train | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L414-L418 |
532 | google/fleetspeak | fleetspeak/src/server/grpcservice/client/client.py | OutgoingConnection._RetryLoop | def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time() | python | def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time() | ['def', '_RetryLoop', '(', 'self', ',', 'func', ',', 'timeout', '=', 'None', ')', ':', 'timeout', '=', 'timeout', 'or', 'self', '.', 'DEFAULT_TIMEOUT', 'deadline', '=', 'time', '.', 'time', '(', ')', '+', 'timeout', 'sleep', '=', '1', 'while', 'True', ':', 'try', ':', 'return', 'func', '(', 'timeout', ')', 'except', 'grpc', '.', 'RpcError', ':', 'if', 'time', '.', 'time', '(', ')', '+', 'sleep', '>', 'deadline', ':', 'raise', 'time', '.', 'sleep', '(', 'sleep', ')', 'sleep', '*=', '2', 'timeout', '=', 'deadline', '-', 'time', '.', 'time', '(', ')'] | Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed. | ['Retries', 'an', 'operation', 'until', 'success', 'or', 'deadline', '.'] | train | https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L150-L172 |
533 | astrocatalogs/astrocats | astrocats/catalog/spectrum.py | Spectrum._check | def _check(self):
"""Check that spectrum has legal combination of attributes."""
# Run the super method
super(Spectrum, self)._check()
err_str = None
has_data = self._KEYS.DATA in self
has_wave = self._KEYS.WAVELENGTHS in self
has_flux = self._KEYS.FLUXES in self
has_filename = self._KEYS.FILENAME in self
if not has_data:
if (not has_wave or not has_flux) and not has_filename:
err_str = (
"If `{}` not given".format(self._KEYS.DATA) +
"; `{}` or `{}` needed".format(
self._KEYS.WAVELENGTHS, self._KEYS.FLUXES))
if err_str is not None:
raise ValueError(err_str)
return | python | def _check(self):
"""Check that spectrum has legal combination of attributes."""
# Run the super method
super(Spectrum, self)._check()
err_str = None
has_data = self._KEYS.DATA in self
has_wave = self._KEYS.WAVELENGTHS in self
has_flux = self._KEYS.FLUXES in self
has_filename = self._KEYS.FILENAME in self
if not has_data:
if (not has_wave or not has_flux) and not has_filename:
err_str = (
"If `{}` not given".format(self._KEYS.DATA) +
"; `{}` or `{}` needed".format(
self._KEYS.WAVELENGTHS, self._KEYS.FLUXES))
if err_str is not None:
raise ValueError(err_str)
return | ['def', '_check', '(', 'self', ')', ':', '# Run the super method', 'super', '(', 'Spectrum', ',', 'self', ')', '.', '_check', '(', ')', 'err_str', '=', 'None', 'has_data', '=', 'self', '.', '_KEYS', '.', 'DATA', 'in', 'self', 'has_wave', '=', 'self', '.', '_KEYS', '.', 'WAVELENGTHS', 'in', 'self', 'has_flux', '=', 'self', '.', '_KEYS', '.', 'FLUXES', 'in', 'self', 'has_filename', '=', 'self', '.', '_KEYS', '.', 'FILENAME', 'in', 'self', 'if', 'not', 'has_data', ':', 'if', '(', 'not', 'has_wave', 'or', 'not', 'has_flux', ')', 'and', 'not', 'has_filename', ':', 'err_str', '=', '(', '"If `{}` not given"', '.', 'format', '(', 'self', '.', '_KEYS', '.', 'DATA', ')', '+', '"; `{}` or `{}` needed"', '.', 'format', '(', 'self', '.', '_KEYS', '.', 'WAVELENGTHS', ',', 'self', '.', '_KEYS', '.', 'FLUXES', ')', ')', 'if', 'err_str', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', 'err_str', ')', 'return'] | Check that spectrum has legal combination of attributes. | ['Check', 'that', 'spectrum', 'has', 'legal', 'combination', 'of', 'attributes', '.'] | train | https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/spectrum.py#L112-L133 |
534 | shichao-an/115wangpan | u115/api.py | File.reload | def reload(self):
"""
Reload file info and metadata
* name
* sha
* pickcode
"""
res = self.api._req_file(self.fid)
data = res['data'][0]
self.name = data['file_name']
self.sha = data['sha1']
self.pickcode = data['pick_code'] | python | def reload(self):
"""
Reload file info and metadata
* name
* sha
* pickcode
"""
res = self.api._req_file(self.fid)
data = res['data'][0]
self.name = data['file_name']
self.sha = data['sha1']
self.pickcode = data['pick_code'] | ['def', 'reload', '(', 'self', ')', ':', 'res', '=', 'self', '.', 'api', '.', '_req_file', '(', 'self', '.', 'fid', ')', 'data', '=', 'res', '[', "'data'", ']', '[', '0', ']', 'self', '.', 'name', '=', 'data', '[', "'file_name'", ']', 'self', '.', 'sha', '=', 'data', '[', "'sha1'", ']', 'self', '.', 'pickcode', '=', 'data', '[', "'pick_code'", ']'] | Reload file info and metadata
* name
* sha
* pickcode | ['Reload', 'file', 'info', 'and', 'metadata'] | train | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1302-L1315 |
535 | mitsei/dlkit | dlkit/json_/relationship/sessions.py | FamilyHierarchyDesignSession.add_root_family | def add_root_family(self, family_id):
"""Adds a root family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: AlreadyExists - ``family_id`` is already in hierarchy
raise: NotFound - ``family_id`` not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_root_catalog(catalog_id=family_id)
return self._hierarchy_session.add_root(id_=family_id) | python | def add_root_family(self, family_id):
"""Adds a root family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: AlreadyExists - ``family_id`` is already in hierarchy
raise: NotFound - ``family_id`` not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_root_catalog(catalog_id=family_id)
return self._hierarchy_session.add_root(id_=family_id) | ['def', 'add_root_family', '(', 'self', ',', 'family_id', ')', ':', '# Implemented from template for', '# osid.resource.BinHierarchyDesignSession.add_root_bin_template', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'add_root_catalog', '(', 'catalog_id', '=', 'family_id', ')', 'return', 'self', '.', '_hierarchy_session', '.', 'add_root', '(', 'id_', '=', 'family_id', ')'] | Adds a root family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: AlreadyExists - ``family_id`` is already in hierarchy
raise: NotFound - ``family_id`` not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Adds', 'a', 'root', 'family', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/sessions.py#L2456-L2472 |
536 | OCHA-DAP/hdx-python-api | src/hdx/data/hdxobject.py | HDXObject._read_from_hdx | def _read_from_hdx(self, object_type, value, fieldname='id',
action=None, **kwargs):
# type: (str, str, str, Optional[str], Any) -> Tuple[bool, Union[Dict, str]]
"""Makes a read call to HDX passing in given parameter.
Args:
object_type (str): Description of HDX object type (for messages)
value (str): Value of HDX field
fieldname (str): HDX field name. Defaults to id.
action (Optional[str]): Replacement CKAN action url to use. Defaults to None.
**kwargs: Other fields to pass to CKAN.
Returns:
Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
"""
if not fieldname:
raise HDXError('Empty %s field name!' % object_type)
if action is None:
action = self.actions()['show']
data = {fieldname: value}
data.update(kwargs)
try:
result = self.configuration.call_remoteckan(action, data)
return True, result
except NotFound:
return False, '%s=%s: not found!' % (fieldname, value)
except Exception as e:
raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e) | python | def _read_from_hdx(self, object_type, value, fieldname='id',
action=None, **kwargs):
# type: (str, str, str, Optional[str], Any) -> Tuple[bool, Union[Dict, str]]
"""Makes a read call to HDX passing in given parameter.
Args:
object_type (str): Description of HDX object type (for messages)
value (str): Value of HDX field
fieldname (str): HDX field name. Defaults to id.
action (Optional[str]): Replacement CKAN action url to use. Defaults to None.
**kwargs: Other fields to pass to CKAN.
Returns:
Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
"""
if not fieldname:
raise HDXError('Empty %s field name!' % object_type)
if action is None:
action = self.actions()['show']
data = {fieldname: value}
data.update(kwargs)
try:
result = self.configuration.call_remoteckan(action, data)
return True, result
except NotFound:
return False, '%s=%s: not found!' % (fieldname, value)
except Exception as e:
raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e) | ['def', '_read_from_hdx', '(', 'self', ',', 'object_type', ',', 'value', ',', 'fieldname', '=', "'id'", ',', 'action', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# type: (str, str, str, Optional[str], Any) -> Tuple[bool, Union[Dict, str]]', 'if', 'not', 'fieldname', ':', 'raise', 'HDXError', '(', "'Empty %s field name!'", '%', 'object_type', ')', 'if', 'action', 'is', 'None', ':', 'action', '=', 'self', '.', 'actions', '(', ')', '[', "'show'", ']', 'data', '=', '{', 'fieldname', ':', 'value', '}', 'data', '.', 'update', '(', 'kwargs', ')', 'try', ':', 'result', '=', 'self', '.', 'configuration', '.', 'call_remoteckan', '(', 'action', ',', 'data', ')', 'return', 'True', ',', 'result', 'except', 'NotFound', ':', 'return', 'False', ',', "'%s=%s: not found!'", '%', '(', 'fieldname', ',', 'value', ')', 'except', 'Exception', 'as', 'e', ':', 'raisefrom', '(', 'HDXError', ',', "'Failed when trying to read: %s=%s! (POST)'", '%', '(', 'fieldname', ',', 'value', ')', ',', 'e', ')'] | Makes a read call to HDX passing in given parameter.
Args:
object_type (str): Description of HDX object type (for messages)
value (str): Value of HDX field
fieldname (str): HDX field name. Defaults to id.
action (Optional[str]): Replacement CKAN action url to use. Defaults to None.
**kwargs: Other fields to pass to CKAN.
Returns:
Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error) | ['Makes', 'a', 'read', 'call', 'to', 'HDX', 'passing', 'in', 'given', 'parameter', '.'] | train | https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/hdxobject.py#L95-L122 |
537 | InQuest/python-sandboxapi | sandboxapi/fireeye.py | FireEyeAPI.analyze | def analyze(self, handle, filename):
"""Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File ID as a string
"""
# multipart post files.
files = {"file": (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
# add submission options
data = {
#FIXME: These may need to change, see docs page 36
'options': '{"application":"0","timeout":"500","priority":"0","profiles":["%s"],"analysistype":"0","force":"true","prefetch":"1"}' % self.profile,
}
response = self._request("/submissions", method='POST', params=data, files=files)
try:
if response.status_code == 200:
# good response
try:
return response.json()['ID']
except TypeError:
return response.json()[0]['ID']
else:
raise sandboxapi.SandboxError("api error in analyze ({u}): {r}".format(u=response.url, r=response.content))
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | python | def analyze(self, handle, filename):
"""Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File ID as a string
"""
# multipart post files.
files = {"file": (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
# add submission options
data = {
#FIXME: These may need to change, see docs page 36
'options': '{"application":"0","timeout":"500","priority":"0","profiles":["%s"],"analysistype":"0","force":"true","prefetch":"1"}' % self.profile,
}
response = self._request("/submissions", method='POST', params=data, files=files)
try:
if response.status_code == 200:
# good response
try:
return response.json()['ID']
except TypeError:
return response.json()[0]['ID']
else:
raise sandboxapi.SandboxError("api error in analyze ({u}): {r}".format(u=response.url, r=response.content))
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | ['def', 'analyze', '(', 'self', ',', 'handle', ',', 'filename', ')', ':', '# multipart post files.', 'files', '=', '{', '"file"', ':', '(', 'filename', ',', 'handle', ')', '}', '# ensure the handle is at offset 0.', 'handle', '.', 'seek', '(', '0', ')', '# add submission options', 'data', '=', '{', '#FIXME: These may need to change, see docs page 36', "'options'", ':', '\'{"application":"0","timeout":"500","priority":"0","profiles":["%s"],"analysistype":"0","force":"true","prefetch":"1"}\'', '%', 'self', '.', 'profile', ',', '}', 'response', '=', 'self', '.', '_request', '(', '"/submissions"', ',', 'method', '=', "'POST'", ',', 'params', '=', 'data', ',', 'files', '=', 'files', ')', 'try', ':', 'if', 'response', '.', 'status_code', '==', '200', ':', '# good response', 'try', ':', 'return', 'response', '.', 'json', '(', ')', '[', "'ID'", ']', 'except', 'TypeError', ':', 'return', 'response', '.', 'json', '(', ')', '[', '0', ']', '[', "'ID'", ']', 'else', ':', 'raise', 'sandboxapi', '.', 'SandboxError', '(', '"api error in analyze ({u}): {r}"', '.', 'format', '(', 'u', '=', 'response', '.', 'url', ',', 'r', '=', 'response', '.', 'content', ')', ')', 'except', '(', 'ValueError', ',', 'KeyError', ')', 'as', 'e', ':', 'raise', 'sandboxapi', '.', 'SandboxError', '(', '"error in analyze: {e}"', '.', 'format', '(', 'e', '=', 'e', ')', ')'] | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File ID as a string | ['Submit', 'a', 'file', 'for', 'analysis', '.'] | train | https://github.com/InQuest/python-sandboxapi/blob/9bad73f453e25d7d23e7b4b1ae927f44a35a5bc3/sandboxapi/fireeye.py#L79-L114 |
538 | limodou/uliweb | uliweb/core/dispatch.py | bind | def bind(topic, signal=None, kind=MIDDLE, nice=-1):
"""
This is a decorator function, so you should use it as:
@bind('init')
def process_init(a, b):
...
"""
def f(func):
if not topic in _receivers:
receivers = _receivers[topic] = []
else:
receivers = _receivers[topic]
if nice == -1:
if kind == MIDDLE:
n = 500
elif kind == HIGH:
n = 100
else:
n = 900
else:
n = nice
if callable(func):
func_name = func.__module__ + '.' + func.__name__
func = func
else:
func_name = func
func = None
_f = (n, {'func':func, 'signal':signal, 'func_name':func_name})
receivers.append(_f)
return func
return f | python | def bind(topic, signal=None, kind=MIDDLE, nice=-1):
"""
This is a decorator function, so you should use it as:
@bind('init')
def process_init(a, b):
...
"""
def f(func):
if not topic in _receivers:
receivers = _receivers[topic] = []
else:
receivers = _receivers[topic]
if nice == -1:
if kind == MIDDLE:
n = 500
elif kind == HIGH:
n = 100
else:
n = 900
else:
n = nice
if callable(func):
func_name = func.__module__ + '.' + func.__name__
func = func
else:
func_name = func
func = None
_f = (n, {'func':func, 'signal':signal, 'func_name':func_name})
receivers.append(_f)
return func
return f | ['def', 'bind', '(', 'topic', ',', 'signal', '=', 'None', ',', 'kind', '=', 'MIDDLE', ',', 'nice', '=', '-', '1', ')', ':', 'def', 'f', '(', 'func', ')', ':', 'if', 'not', 'topic', 'in', '_receivers', ':', 'receivers', '=', '_receivers', '[', 'topic', ']', '=', '[', ']', 'else', ':', 'receivers', '=', '_receivers', '[', 'topic', ']', 'if', 'nice', '==', '-', '1', ':', 'if', 'kind', '==', 'MIDDLE', ':', 'n', '=', '500', 'elif', 'kind', '==', 'HIGH', ':', 'n', '=', '100', 'else', ':', 'n', '=', '900', 'else', ':', 'n', '=', 'nice', 'if', 'callable', '(', 'func', ')', ':', 'func_name', '=', 'func', '.', '__module__', '+', "'.'", '+', 'func', '.', '__name__', 'func', '=', 'func', 'else', ':', 'func_name', '=', 'func', 'func', '=', 'None', '_f', '=', '(', 'n', ',', '{', "'func'", ':', 'func', ',', "'signal'", ':', 'signal', ',', "'func_name'", ':', 'func_name', '}', ')', 'receivers', '.', 'append', '(', '_f', ')', 'return', 'func', 'return', 'f'] | This is a decorator function, so you should use it as:
@bind('init')
def process_init(a, b):
... | ['This', 'is', 'a', 'decorator', 'function', 'so', 'you', 'should', 'use', 'it', 'as', ':'] | train | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/dispatch.py#L20-L52 |
539 | FNNDSC/med2image | med2image/error.py | fatal | def fatal( callingClass, astr_key, astr_extraMsg="" ):
'''
Convenience dispatcher to the error_exit() method.
Will raise "fatal" error, i.e. terminate script.
'''
b_exitToOS = True
report( callingClass, astr_key, b_exitToOS, astr_extraMsg ) | python | def fatal( callingClass, astr_key, astr_extraMsg="" ):
'''
Convenience dispatcher to the error_exit() method.
Will raise "fatal" error, i.e. terminate script.
'''
b_exitToOS = True
report( callingClass, astr_key, b_exitToOS, astr_extraMsg ) | ['def', 'fatal', '(', 'callingClass', ',', 'astr_key', ',', 'astr_extraMsg', '=', '""', ')', ':', 'b_exitToOS', '=', 'True', 'report', '(', 'callingClass', ',', 'astr_key', ',', 'b_exitToOS', ',', 'astr_extraMsg', ')'] | Convenience dispatcher to the error_exit() method.
Will raise "fatal" error, i.e. terminate script. | ['Convenience', 'dispatcher', 'to', 'the', 'error_exit', '()', 'method', '.'] | train | https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/error.py#L76-L83 |
540 | insightindustry/validator-collection | validator_collection/checkers.py | are_equivalent | def are_equivalent(*args, **kwargs):
"""Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
if len(args) == 1:
return True
first_item = args[0]
for item in args[1:]:
if type(item) != type(first_item): # pylint: disable=C0123
return False
if isinstance(item, dict):
if not are_dicts_equivalent(item, first_item):
return False
elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)):
if len(item) != len(first_item):
return False
for value in item:
if value not in first_item:
return False
for value in first_item:
if value not in item:
return False
else:
if item != first_item:
return False
return True | python | def are_equivalent(*args, **kwargs):
"""Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
if len(args) == 1:
return True
first_item = args[0]
for item in args[1:]:
if type(item) != type(first_item): # pylint: disable=C0123
return False
if isinstance(item, dict):
if not are_dicts_equivalent(item, first_item):
return False
elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)):
if len(item) != len(first_item):
return False
for value in item:
if value not in first_item:
return False
for value in first_item:
if value not in item:
return False
else:
if item != first_item:
return False
return True | ['def', 'are_equivalent', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'len', '(', 'args', ')', '==', '1', ':', 'return', 'True', 'first_item', '=', 'args', '[', '0', ']', 'for', 'item', 'in', 'args', '[', '1', ':', ']', ':', 'if', 'type', '(', 'item', ')', '!=', 'type', '(', 'first_item', ')', ':', '# pylint: disable=C0123', 'return', 'False', 'if', 'isinstance', '(', 'item', ',', 'dict', ')', ':', 'if', 'not', 'are_dicts_equivalent', '(', 'item', ',', 'first_item', ')', ':', 'return', 'False', 'elif', 'hasattr', '(', 'item', ',', "'__iter__'", ')', 'and', 'not', 'isinstance', '(', 'item', ',', '(', 'str', ',', 'bytes', ',', 'dict', ')', ')', ':', 'if', 'len', '(', 'item', ')', '!=', 'len', '(', 'first_item', ')', ':', 'return', 'False', 'for', 'value', 'in', 'item', ':', 'if', 'value', 'not', 'in', 'first_item', ':', 'return', 'False', 'for', 'value', 'in', 'first_item', ':', 'if', 'value', 'not', 'in', 'item', ':', 'return', 'False', 'else', ':', 'if', 'item', '!=', 'first_item', ':', 'return', 'False', 'return', 'True'] | Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | ['Indicate', 'if', 'arguments', 'passed', 'to', 'this', 'function', 'are', 'equivalent', '.'] | train | https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L89-L148 |
541 | nicolargo/glances | glances/plugins/glances_sensors.py | GlancesGrabSensors.get | def get(self, sensor_type='temperature_core'):
"""Get sensors list."""
self.__update__()
if sensor_type == 'temperature_core':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_TEMP_UNIT]
elif sensor_type == 'fan_speed':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_FAN_UNIT]
else:
# Unknown type
logger.debug("Unknown sensor type %s" % sensor_type)
ret = []
return ret | python | def get(self, sensor_type='temperature_core'):
"""Get sensors list."""
self.__update__()
if sensor_type == 'temperature_core':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_TEMP_UNIT]
elif sensor_type == 'fan_speed':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_FAN_UNIT]
else:
# Unknown type
logger.debug("Unknown sensor type %s" % sensor_type)
ret = []
return ret | ['def', 'get', '(', 'self', ',', 'sensor_type', '=', "'temperature_core'", ')', ':', 'self', '.', '__update__', '(', ')', 'if', 'sensor_type', '==', "'temperature_core'", ':', 'ret', '=', '[', 's', 'for', 's', 'in', 'self', '.', 'sensors_list', 'if', 's', '[', "'unit'", ']', '==', 'SENSOR_TEMP_UNIT', ']', 'elif', 'sensor_type', '==', "'fan_speed'", ':', 'ret', '=', '[', 's', 'for', 's', 'in', 'self', '.', 'sensors_list', 'if', 's', '[', "'unit'", ']', '==', 'SENSOR_FAN_UNIT', ']', 'else', ':', '# Unknown type', 'logger', '.', 'debug', '(', '"Unknown sensor type %s"', '%', 'sensor_type', ')', 'ret', '=', '[', ']', 'return', 'ret'] | Get sensors list. | ['Get', 'sensors', 'list', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_sensors.py#L311-L322 |
542 | tuomas2/automate | src/automate/system.py | System.register_service | def register_service(self, service):
"""
Register service into the system. Called by Services.
"""
if service not in self.services:
self.services.append(service) | python | def register_service(self, service):
"""
Register service into the system. Called by Services.
"""
if service not in self.services:
self.services.append(service) | ['def', 'register_service', '(', 'self', ',', 'service', ')', ':', 'if', 'service', 'not', 'in', 'self', '.', 'services', ':', 'self', '.', 'services', '.', 'append', '(', 'service', ')'] | Register service into the system. Called by Services. | ['Register', 'service', 'into', 'the', 'system', '.', 'Called', 'by', 'Services', '.'] | train | https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/system.py#L376-L381 |
543 | bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py | Graph.get_hops | def get_hops(self, start, end=None, forward=True):
"""
Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print graph.get_hops(1, 8)
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops
"""
if forward:
return list(self._iterbfs(start=start, end=end, forward=True))
else:
return list(self._iterbfs(start=start, end=end, forward=False)) | python | def get_hops(self, start, end=None, forward=True):
"""
Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print graph.get_hops(1, 8)
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops
"""
if forward:
return list(self._iterbfs(start=start, end=end, forward=True))
else:
return list(self._iterbfs(start=start, end=end, forward=False)) | ['def', 'get_hops', '(', 'self', ',', 'start', ',', 'end', '=', 'None', ',', 'forward', '=', 'True', ')', ':', 'if', 'forward', ':', 'return', 'list', '(', 'self', '.', '_iterbfs', '(', 'start', '=', 'start', ',', 'end', '=', 'end', ',', 'forward', '=', 'True', ')', ')', 'else', ':', 'return', 'list', '(', 'self', '.', '_iterbfs', '(', 'start', '=', 'start', ',', 'end', '=', 'end', ',', 'forward', '=', 'False', ')', ')'] | Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print graph.get_hops(1, 8)
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops | ['Computes', 'the', 'hop', 'distance', 'to', 'all', 'nodes', 'centered', 'around', 'a', 'specified', 'node', '.'] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L647-L673 |
544 | scnerd/miniutils | miniutils/progress_bar.py | iparallel_progbar | def iparallel_progbar(mapper, iterable, nprocs=None, starmap=False, flatmap=False, shuffle=False,
verbose=True, verbose_flatmap=None, max_cache=-1, **kwargs):
"""Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned. Yields
objects as soon as they're computed, but does not guarantee that they'll be in the correct order.
:param mapper: The mapping function to apply to elements of the iterable
:param iterable: The iterable to map
:param nprocs: The number of processes (defaults to the number of cpu's)
:param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a
tuple as an argument
:param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects
:param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform
runtimes if processing different objects takes different amounts of time.
:param verbose: Whether or not to print the progress bar
:param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned
:param max_cache: Maximum number of mapped objects to permit in the queue at once
:param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``)
:return: A list of the returned objects, in whatever order they're done being computed
"""
results = _parallel_progbar_launch(mapper, iterable, nprocs, starmap, flatmap, shuffle, verbose,
verbose_flatmap, max_cache, **kwargs)
return (x for i, x in results) | python | def iparallel_progbar(mapper, iterable, nprocs=None, starmap=False, flatmap=False, shuffle=False,
verbose=True, verbose_flatmap=None, max_cache=-1, **kwargs):
"""Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned. Yields
objects as soon as they're computed, but does not guarantee that they'll be in the correct order.
:param mapper: The mapping function to apply to elements of the iterable
:param iterable: The iterable to map
:param nprocs: The number of processes (defaults to the number of cpu's)
:param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a
tuple as an argument
:param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects
:param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform
runtimes if processing different objects takes different amounts of time.
:param verbose: Whether or not to print the progress bar
:param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned
:param max_cache: Maximum number of mapped objects to permit in the queue at once
:param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``)
:return: A list of the returned objects, in whatever order they're done being computed
"""
results = _parallel_progbar_launch(mapper, iterable, nprocs, starmap, flatmap, shuffle, verbose,
verbose_flatmap, max_cache, **kwargs)
return (x for i, x in results) | ['def', 'iparallel_progbar', '(', 'mapper', ',', 'iterable', ',', 'nprocs', '=', 'None', ',', 'starmap', '=', 'False', ',', 'flatmap', '=', 'False', ',', 'shuffle', '=', 'False', ',', 'verbose', '=', 'True', ',', 'verbose_flatmap', '=', 'None', ',', 'max_cache', '=', '-', '1', ',', '*', '*', 'kwargs', ')', ':', 'results', '=', '_parallel_progbar_launch', '(', 'mapper', ',', 'iterable', ',', 'nprocs', ',', 'starmap', ',', 'flatmap', ',', 'shuffle', ',', 'verbose', ',', 'verbose_flatmap', ',', 'max_cache', ',', '*', '*', 'kwargs', ')', 'return', '(', 'x', 'for', 'i', ',', 'x', 'in', 'results', ')'] | Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned. Yields
objects as soon as they're computed, but does not guarantee that they'll be in the correct order.
:param mapper: The mapping function to apply to elements of the iterable
:param iterable: The iterable to map
:param nprocs: The number of processes (defaults to the number of cpu's)
:param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a
tuple as an argument
:param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects
:param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform
runtimes if processing different objects takes different amounts of time.
:param verbose: Whether or not to print the progress bar
:param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned
:param max_cache: Maximum number of mapped objects to permit in the queue at once
:param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``)
:return: A list of the returned objects, in whatever order they're done being computed | ['Performs', 'a', 'parallel', 'mapping', 'of', 'the', 'given', 'iterable', 'reporting', 'a', 'progress', 'bar', 'as', 'values', 'get', 'returned', '.', 'Yields', 'objects', 'as', 'soon', 'as', 'they', 're', 'computed', 'but', 'does', 'not', 'guarantee', 'that', 'they', 'll', 'be', 'in', 'the', 'correct', 'order', '.'] | train | https://github.com/scnerd/miniutils/blob/fe927e26afc5877416dead28dabdf6604387f42c/miniutils/progress_bar.py#L144-L166 |
545 | CivicSpleen/ckcache | ckcache/filesystem.py | FsLimitedCache.get | def get(self, rel_path, cb=None):
'''Return the file path referenced but rel_path, or None if
it can't be found. If an upstream is declared, it will try to get the file
from the upstream before declaring failure.
'''
import shutil
global_logger.debug(
"LC {} get looking for {}".format(
self.repo_id,
rel_path))
path = os.path.join(self.cache_dir, rel_path)
# If is already exists in the repo, just return it.
if os.path.exists(path):
if not os.path.isfile(path):
raise ValueError("Path does not point to a file")
global_logger.debug(
"LC {} get {} found ".format(
self.repo_id,
path))
return path
if not self.upstream:
# If we don't have an upstream, then we are done.
return None
stream = self.upstream.get_stream(rel_path, cb=cb)
if not stream:
global_logger.debug(
"LC {} get not found in upstream ()".format(
self.repo_id,
rel_path))
return None
# Got a stream from upstream, so put the file in this cache.
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
# Copy the file from the lower cache into this cache.
with open(path, 'w') as f:
shutil.copyfileobj(stream, f)
# Since we've added a file, must keep track of the sizes.
size = os.path.getsize(path)
self._free_up_space(size, this_rel_path=rel_path)
self.add_record(rel_path, size)
stream.close()
if not os.path.exists(path):
raise Exception("Failed to copy upstream data to {} ".format(path))
global_logger.debug(
"LC {} got return from upstream {} -> {} ".format(self.repo_id, rel_path, path))
return path | python | def get(self, rel_path, cb=None):
'''Return the file path referenced but rel_path, or None if
it can't be found. If an upstream is declared, it will try to get the file
from the upstream before declaring failure.
'''
import shutil
global_logger.debug(
"LC {} get looking for {}".format(
self.repo_id,
rel_path))
path = os.path.join(self.cache_dir, rel_path)
# If is already exists in the repo, just return it.
if os.path.exists(path):
if not os.path.isfile(path):
raise ValueError("Path does not point to a file")
global_logger.debug(
"LC {} get {} found ".format(
self.repo_id,
path))
return path
if not self.upstream:
# If we don't have an upstream, then we are done.
return None
stream = self.upstream.get_stream(rel_path, cb=cb)
if not stream:
global_logger.debug(
"LC {} get not found in upstream ()".format(
self.repo_id,
rel_path))
return None
# Got a stream from upstream, so put the file in this cache.
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
# Copy the file from the lower cache into this cache.
with open(path, 'w') as f:
shutil.copyfileobj(stream, f)
# Since we've added a file, must keep track of the sizes.
size = os.path.getsize(path)
self._free_up_space(size, this_rel_path=rel_path)
self.add_record(rel_path, size)
stream.close()
if not os.path.exists(path):
raise Exception("Failed to copy upstream data to {} ".format(path))
global_logger.debug(
"LC {} got return from upstream {} -> {} ".format(self.repo_id, rel_path, path))
return path | ['def', 'get', '(', 'self', ',', 'rel_path', ',', 'cb', '=', 'None', ')', ':', 'import', 'shutil', 'global_logger', '.', 'debug', '(', '"LC {} get looking for {}"', '.', 'format', '(', 'self', '.', 'repo_id', ',', 'rel_path', ')', ')', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'cache_dir', ',', 'rel_path', ')', '# If is already exists in the repo, just return it.', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'raise', 'ValueError', '(', '"Path does not point to a file"', ')', 'global_logger', '.', 'debug', '(', '"LC {} get {} found "', '.', 'format', '(', 'self', '.', 'repo_id', ',', 'path', ')', ')', 'return', 'path', 'if', 'not', 'self', '.', 'upstream', ':', "# If we don't have an upstream, then we are done.", 'return', 'None', 'stream', '=', 'self', '.', 'upstream', '.', 'get_stream', '(', 'rel_path', ',', 'cb', '=', 'cb', ')', 'if', 'not', 'stream', ':', 'global_logger', '.', 'debug', '(', '"LC {} get not found in upstream ()"', '.', 'format', '(', 'self', '.', 'repo_id', ',', 'rel_path', ')', ')', 'return', 'None', '# Got a stream from upstream, so put the file in this cache.', 'dirname', '=', 'os', '.', 'path', '.', 'dirname', '(', 'path', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'dirname', ')', ':', 'os', '.', 'makedirs', '(', 'dirname', ')', '# Copy the file from the lower cache into this cache.', 'with', 'open', '(', 'path', ',', "'w'", ')', 'as', 'f', ':', 'shutil', '.', 'copyfileobj', '(', 'stream', ',', 'f', ')', "# Since we've added a file, must keep track of the sizes.", 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'path', ')', 'self', '.', '_free_up_space', '(', 'size', ',', 'this_rel_path', '=', 'rel_path', ')', 'self', '.', 'add_record', '(', 'rel_path', ',', 'size', ')', 'stream', '.', 'close', '(', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'raise', 'Exception', '(', '"Failed to copy upstream data to {} "', '.', 'format', '(', 'path', ')', ')', 'global_logger', '.', 'debug', '(', '"LC {} got return from upstream {} -> {} "', '.', 'format', '(', 'self', '.', 'repo_id', ',', 'rel_path', ',', 'path', ')', ')', 'return', 'path'] | Return the file path referenced but rel_path, or None if
it can't be found. If an upstream is declared, it will try to get the file
from the upstream before declaring failure. | ['Return', 'the', 'file', 'path', 'referenced', 'but', 'rel_path', 'or', 'None', 'if', 'it', 'can', 't', 'be', 'found', '.', 'If', 'an', 'upstream', 'is', 'declared', 'it', 'will', 'try', 'to', 'get', 'the', 'file', 'from', 'the', 'upstream', 'before', 'declaring', 'failure', '.'] | train | https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/filesystem.py#L597-L657 |
546 | openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py | FabricBase.get_next_create_state | def get_next_create_state(self, state, ret):
"""Return the next create state from previous state. """
if ret:
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
return state
else:
return state + 1
else:
return state | python | def get_next_create_state(self, state, ret):
"""Return the next create state from previous state. """
if ret:
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
return state
else:
return state + 1
else:
return state | ['def', 'get_next_create_state', '(', 'self', ',', 'state', ',', 'ret', ')', ':', 'if', 'ret', ':', 'if', 'state', '==', 'fw_const', '.', 'FABRIC_PREPARE_DONE_STATE', ':', 'return', 'state', 'else', ':', 'return', 'state', '+', '1', 'else', ':', 'return', 'state'] | Return the next create state from previous state. | ['Return', 'the', 'next', 'create', 'state', 'from', 'previous', 'state', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1463-L1471 |
547 | metavee/batchproc | batchproc/core.py | BatchProcessor.load_tasks | def load_tasks(self, cmd, params, args):
"""implements loader interface, return (tasks, config)"""
return generate_tasks('taskname that shows up in log', self._gen_tasks()), self.DOIT_CONFIG | python | def load_tasks(self, cmd, params, args):
"""implements loader interface, return (tasks, config)"""
return generate_tasks('taskname that shows up in log', self._gen_tasks()), self.DOIT_CONFIG | ['def', 'load_tasks', '(', 'self', ',', 'cmd', ',', 'params', ',', 'args', ')', ':', 'return', 'generate_tasks', '(', "'taskname that shows up in log'", ',', 'self', '.', '_gen_tasks', '(', ')', ')', ',', 'self', '.', 'DOIT_CONFIG'] | implements loader interface, return (tasks, config) | ['implements', 'loader', 'interface', 'return', '(', 'tasks', 'config', ')'] | train | https://github.com/metavee/batchproc/blob/aa084a2ac8ab7950f7a7d3adb54b0cf010c6a935/batchproc/core.py#L100-L102 |
548 | crytic/slither | slither/core/declarations/function.py | Function.functions_shadowed | def functions_shadowed(self):
'''
Return the list of functions shadowed
Returns:
list(core.Function)
'''
candidates = [c.functions_not_inherited for c in self.contract.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == self.full_name] | python | def functions_shadowed(self):
'''
Return the list of functions shadowed
Returns:
list(core.Function)
'''
candidates = [c.functions_not_inherited for c in self.contract.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == self.full_name] | ['def', 'functions_shadowed', '(', 'self', ')', ':', 'candidates', '=', '[', 'c', '.', 'functions_not_inherited', 'for', 'c', 'in', 'self', '.', 'contract', '.', 'inheritance', ']', 'candidates', '=', '[', 'candidate', 'for', 'sublist', 'in', 'candidates', 'for', 'candidate', 'in', 'sublist', ']', 'return', '[', 'f', 'for', 'f', 'in', 'candidates', 'if', 'f', '.', 'full_name', '==', 'self', '.', 'full_name', ']'] | Return the list of functions shadowed
Returns:
list(core.Function) | ['Return', 'the', 'list', 'of', 'functions', 'shadowed', 'Returns', ':', 'list', '(', 'core', '.', 'Function', ')'] | train | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/function.py#L553-L562 |
549 | Kozea/pygal | pygal/graph/radar.py | Radar._set_view | def _set_view(self):
"""Assign a view to current graph"""
if self.logarithmic:
view_class = PolarLogView
else:
view_class = PolarView
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
) | python | def _set_view(self):
"""Assign a view to current graph"""
if self.logarithmic:
view_class = PolarLogView
else:
view_class = PolarView
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
) | ['def', '_set_view', '(', 'self', ')', ':', 'if', 'self', '.', 'logarithmic', ':', 'view_class', '=', 'PolarLogView', 'else', ':', 'view_class', '=', 'PolarView', 'self', '.', 'view', '=', 'view_class', '(', 'self', '.', 'width', '-', 'self', '.', 'margin_box', '.', 'x', ',', 'self', '.', 'height', '-', 'self', '.', 'margin_box', '.', 'y', ',', 'self', '.', '_box', ')'] | Assign a view to current graph | ['Assign', 'a', 'view', 'to', 'current', 'graph'] | train | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/radar.py#L59-L69 |
550 | MartinThoma/hwrt | hwrt/segmentation/segmentation.py | get_bb_intersections | def get_bb_intersections(recording):
"""
Get all intersections of the bounding boxes of strokes.
Parameters
----------
recording : list of lists of integers
Returns
-------
A symmetrical matrix which indicates if two bounding boxes intersect.
"""
intersections = numpy.zeros((len(recording), len(recording)),
dtype=bool)
for i in range(len(recording)-1):
a = geometry.get_bounding_box(recording[i]).grow(0.2)
for j in range(i+1, len(recording)):
b = geometry.get_bounding_box(recording[j]).grow(0.2)
intersections[i][j] = geometry.do_bb_intersect(a, b)
intersections[j][i] = intersections[i][j]
return intersections | python | def get_bb_intersections(recording):
"""
Get all intersections of the bounding boxes of strokes.
Parameters
----------
recording : list of lists of integers
Returns
-------
A symmetrical matrix which indicates if two bounding boxes intersect.
"""
intersections = numpy.zeros((len(recording), len(recording)),
dtype=bool)
for i in range(len(recording)-1):
a = geometry.get_bounding_box(recording[i]).grow(0.2)
for j in range(i+1, len(recording)):
b = geometry.get_bounding_box(recording[j]).grow(0.2)
intersections[i][j] = geometry.do_bb_intersect(a, b)
intersections[j][i] = intersections[i][j]
return intersections | ['def', 'get_bb_intersections', '(', 'recording', ')', ':', 'intersections', '=', 'numpy', '.', 'zeros', '(', '(', 'len', '(', 'recording', ')', ',', 'len', '(', 'recording', ')', ')', ',', 'dtype', '=', 'bool', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'recording', ')', '-', '1', ')', ':', 'a', '=', 'geometry', '.', 'get_bounding_box', '(', 'recording', '[', 'i', ']', ')', '.', 'grow', '(', '0.2', ')', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ',', 'len', '(', 'recording', ')', ')', ':', 'b', '=', 'geometry', '.', 'get_bounding_box', '(', 'recording', '[', 'j', ']', ')', '.', 'grow', '(', '0.2', ')', 'intersections', '[', 'i', ']', '[', 'j', ']', '=', 'geometry', '.', 'do_bb_intersect', '(', 'a', ',', 'b', ')', 'intersections', '[', 'j', ']', '[', 'i', ']', '=', 'intersections', '[', 'i', ']', '[', 'j', ']', 'return', 'intersections'] | Get all intersections of the bounding boxes of strokes.
Parameters
----------
recording : list of lists of integers
Returns
-------
A symmetrical matrix which indicates if two bounding boxes intersect. | ['Get', 'all', 'intersections', 'of', 'the', 'bounding', 'boxes', 'of', 'strokes', '.'] | train | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/segmentation/segmentation.py#L1006-L1026 |
551 | iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py | Entry.get_text_contents | def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents() | python | def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents() | ['def', 'get_text_contents', '(', 'self', ')', ':', 'try', ':', 'self', '=', 'self', '.', 'disambiguate', '(', 'must_exist', '=', '1', ')', 'except', 'SCons', '.', 'Errors', '.', 'UserError', ':', '# There was nothing on disk with which to disambiguate', '# this entry. Leave it as an Entry, but return a null', '# string so calls to get_text_contents() in emitters and', "# the like (e.g. in qt.py) don't have to disambiguate by", '# hand or catch the exception.', 'return', "''", 'else', ':', 'return', 'self', '.', 'get_text_contents', '(', ')'] | Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry. | ['Fetch', 'the', 'decoded', 'text', 'contents', 'of', 'a', 'Unicode', 'encoded', 'Entry', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L989-L1005 |
552 | mitsei/dlkit | dlkit/json_/repository/sessions.py | RepositoryQuerySession.get_repositories_by_query | def get_repositories_by_query(self, repository_query):
"""Gets a list of ``Repositories`` matching the given repository query.
arg: repository_query (osid.repository.RepositoryQuery): the
repository query
return: (osid.repository.RepositoryList) - the returned
``RepositoryList``
raise: NullArgument - ``repository_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``repository_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(repository_query)
query_terms = dict(repository_query._query_terms)
collection = JSONClientValidated('repository',
collection='Repository',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.RepositoryList(result, runtime=self._runtime) | python | def get_repositories_by_query(self, repository_query):
"""Gets a list of ``Repositories`` matching the given repository query.
arg: repository_query (osid.repository.RepositoryQuery): the
repository query
return: (osid.repository.RepositoryList) - the returned
``RepositoryList``
raise: NullArgument - ``repository_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``repository_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(repository_query)
query_terms = dict(repository_query._query_terms)
collection = JSONClientValidated('repository',
collection='Repository',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.RepositoryList(result, runtime=self._runtime) | ['def', 'get_repositories_by_query', '(', 'self', ',', 'repository_query', ')', ':', '# Implemented from template for', '# osid.resource.BinQuerySession.get_bins_by_query_template', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'get_catalogs_by_query', '(', 'repository_query', ')', 'query_terms', '=', 'dict', '(', 'repository_query', '.', '_query_terms', ')', 'collection', '=', 'JSONClientValidated', '(', "'repository'", ',', 'collection', '=', "'Repository'", ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'result', '=', 'collection', '.', 'find', '(', 'query_terms', ')', '.', 'sort', '(', "'_id'", ',', 'DESCENDING', ')', 'return', 'objects', '.', 'RepositoryList', '(', 'result', ',', 'runtime', '=', 'self', '.', '_runtime', ')'] | Gets a list of ``Repositories`` matching the given repository query.
arg: repository_query (osid.repository.RepositoryQuery): the
repository query
return: (osid.repository.RepositoryList) - the returned
``RepositoryList``
raise: NullArgument - ``repository_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``repository_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'a', 'list', 'of', 'Repositories', 'matching', 'the', 'given', 'repository', 'query', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L4903-L4928 |
553 | RudolfCardinal/pythonlib | cardinal_pythonlib/modules.py | contains_c_extension | def contains_c_extension(module: ModuleType,
import_all_submodules: bool = True,
include_external_imports: bool = False,
seen: List[ModuleType] = None) -> bool:
"""
Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django)
""" # noqa
assert inspect.ismodule(module), '"{}" not a module.'.format(module)
if seen is None: # only true for the top-level call
seen = [] # type: List[ModuleType]
if module in seen: # modules can "contain" themselves
# already inspected; avoid infinite loops
return False
seen.append(module)
# Check the thing we were asked about
is_c_ext = is_c_extension(module)
log.info("Is module {!r} a C extension? {}", module, is_c_ext)
if is_c_ext:
return True
if is_builtin_module(module):
# built-in, therefore we stop searching it
return False
# Now check any children, in a couple of ways
top_level_module = seen[0]
top_path = os.path.dirname(top_level_module.__file__)
# Recurse using dir(). This picks up modules that are automatically
# imported by our top-level model. But it won't pick up all submodules;
# try e.g. for django.
for candidate_name in dir(module):
candidate = getattr(module, candidate_name)
# noinspection PyBroadException
try:
if not inspect.ismodule(candidate):
# not a module
continue
except Exception:
# e.g. a Django module that won't import until we configure its
# settings
log.error("Failed to test ismodule() status of {!r}", candidate)
continue
if is_builtin_module(candidate):
# built-in, therefore we stop searching it
continue
candidate_fname = getattr(candidate, "__file__")
if not include_external_imports:
if os.path.commonpath([top_path, candidate_fname]) != top_path:
log.debug("Skipping, not within the top-level module's "
"directory: {!r}", candidate)
continue
# Recurse:
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level, below # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
if import_all_submodules:
if not is_module_a_package(module):
log.debug("Top-level module is not a package: {!r}", module)
return False
# Otherwise, for things like Django, we need to recurse in a different
# way to scan everything.
# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa
log.debug("Walking path: {!r}", top_path)
# noinspection PyBroadException
try:
for loader, module_name, is_pkg in pkgutil.walk_packages([top_path]): # noqa
if not is_pkg:
log.debug("Skipping, not a package: {!r}", module_name)
continue
log.debug("Manually importing: {!r}", module_name)
# noinspection PyBroadException
try:
candidate = loader.find_module(module_name)\
.load_module(module_name) # noqa
except Exception:
# e.g. Alembic "autogenerate" gives: "ValueError: attempted
# relative import beyond top-level package"; or Django
# "django.core.exceptions.ImproperlyConfigured"
log.error("Package failed to import: {!r}", module_name)
continue
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
except Exception:
log.error("Unable to walk packages further; no C extensions "
"detected so far!")
raise
return False | python | def contains_c_extension(module: ModuleType,
import_all_submodules: bool = True,
include_external_imports: bool = False,
seen: List[ModuleType] = None) -> bool:
"""
Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django)
""" # noqa
assert inspect.ismodule(module), '"{}" not a module.'.format(module)
if seen is None: # only true for the top-level call
seen = [] # type: List[ModuleType]
if module in seen: # modules can "contain" themselves
# already inspected; avoid infinite loops
return False
seen.append(module)
# Check the thing we were asked about
is_c_ext = is_c_extension(module)
log.info("Is module {!r} a C extension? {}", module, is_c_ext)
if is_c_ext:
return True
if is_builtin_module(module):
# built-in, therefore we stop searching it
return False
# Now check any children, in a couple of ways
top_level_module = seen[0]
top_path = os.path.dirname(top_level_module.__file__)
# Recurse using dir(). This picks up modules that are automatically
# imported by our top-level model. But it won't pick up all submodules;
# try e.g. for django.
for candidate_name in dir(module):
candidate = getattr(module, candidate_name)
# noinspection PyBroadException
try:
if not inspect.ismodule(candidate):
# not a module
continue
except Exception:
# e.g. a Django module that won't import until we configure its
# settings
log.error("Failed to test ismodule() status of {!r}", candidate)
continue
if is_builtin_module(candidate):
# built-in, therefore we stop searching it
continue
candidate_fname = getattr(candidate, "__file__")
if not include_external_imports:
if os.path.commonpath([top_path, candidate_fname]) != top_path:
log.debug("Skipping, not within the top-level module's "
"directory: {!r}", candidate)
continue
# Recurse:
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level, below # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
if import_all_submodules:
if not is_module_a_package(module):
log.debug("Top-level module is not a package: {!r}", module)
return False
# Otherwise, for things like Django, we need to recurse in a different
# way to scan everything.
# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa
log.debug("Walking path: {!r}", top_path)
# noinspection PyBroadException
try:
for loader, module_name, is_pkg in pkgutil.walk_packages([top_path]): # noqa
if not is_pkg:
log.debug("Skipping, not a package: {!r}", module_name)
continue
log.debug("Manually importing: {!r}", module_name)
# noinspection PyBroadException
try:
candidate = loader.find_module(module_name)\
.load_module(module_name) # noqa
except Exception:
# e.g. Alembic "autogenerate" gives: "ValueError: attempted
# relative import beyond top-level package"; or Django
# "django.core.exceptions.ImproperlyConfigured"
log.error("Package failed to import: {!r}", module_name)
continue
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
except Exception:
log.error("Unable to walk packages further; no C extensions "
"detected so far!")
raise
return False | ['def', 'contains_c_extension', '(', 'module', ':', 'ModuleType', ',', 'import_all_submodules', ':', 'bool', '=', 'True', ',', 'include_external_imports', ':', 'bool', '=', 'False', ',', 'seen', ':', 'List', '[', 'ModuleType', ']', '=', 'None', ')', '->', 'bool', ':', '# noqa', 'assert', 'inspect', '.', 'ismodule', '(', 'module', ')', ',', '\'"{}" not a module.\'', '.', 'format', '(', 'module', ')', 'if', 'seen', 'is', 'None', ':', '# only true for the top-level call', 'seen', '=', '[', ']', '# type: List[ModuleType]', 'if', 'module', 'in', 'seen', ':', '# modules can "contain" themselves', '# already inspected; avoid infinite loops', 'return', 'False', 'seen', '.', 'append', '(', 'module', ')', '# Check the thing we were asked about', 'is_c_ext', '=', 'is_c_extension', '(', 'module', ')', 'log', '.', 'info', '(', '"Is module {!r} a C extension? {}"', ',', 'module', ',', 'is_c_ext', ')', 'if', 'is_c_ext', ':', 'return', 'True', 'if', 'is_builtin_module', '(', 'module', ')', ':', '# built-in, therefore we stop searching it', 'return', 'False', '# Now check any children, in a couple of ways', 'top_level_module', '=', 'seen', '[', '0', ']', 'top_path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'top_level_module', '.', '__file__', ')', '# Recurse using dir(). This picks up modules that are automatically', "# imported by our top-level model. But it won't pick up all submodules;", '# try e.g. for django.', 'for', 'candidate_name', 'in', 'dir', '(', 'module', ')', ':', 'candidate', '=', 'getattr', '(', 'module', ',', 'candidate_name', ')', '# noinspection PyBroadException', 'try', ':', 'if', 'not', 'inspect', '.', 'ismodule', '(', 'candidate', ')', ':', '# not a module', 'continue', 'except', 'Exception', ':', "# e.g. a Django module that won't import until we configure its", '# settings', 'log', '.', 'error', '(', '"Failed to test ismodule() status of {!r}"', ',', 'candidate', ')', 'continue', 'if', 'is_builtin_module', '(', 'candidate', ')', ':', '# built-in, therefore we stop searching it', 'continue', 'candidate_fname', '=', 'getattr', '(', 'candidate', ',', '"__file__"', ')', 'if', 'not', 'include_external_imports', ':', 'if', 'os', '.', 'path', '.', 'commonpath', '(', '[', 'top_path', ',', 'candidate_fname', ']', ')', '!=', 'top_path', ':', 'log', '.', 'debug', '(', '"Skipping, not within the top-level module\'s "', '"directory: {!r}"', ',', 'candidate', ')', 'continue', '# Recurse:', 'if', 'contains_c_extension', '(', 'module', '=', 'candidate', ',', 'import_all_submodules', '=', 'False', ',', '# only done at the top level, below # noqa', 'include_external_imports', '=', 'include_external_imports', ',', 'seen', '=', 'seen', ')', ':', 'return', 'True', 'if', 'import_all_submodules', ':', 'if', 'not', 'is_module_a_package', '(', 'module', ')', ':', 'log', '.', 'debug', '(', '"Top-level module is not a package: {!r}"', ',', 'module', ')', 'return', 'False', '# Otherwise, for things like Django, we need to recurse in a different', '# way to scan everything.', '# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa', 'log', '.', 'debug', '(', '"Walking path: {!r}"', ',', 'top_path', ')', '# noinspection PyBroadException', 'try', ':', 'for', 'loader', ',', 'module_name', ',', 'is_pkg', 'in', 'pkgutil', '.', 'walk_packages', '(', '[', 'top_path', ']', ')', ':', '# noqa', 'if', 'not', 'is_pkg', ':', 'log', '.', 'debug', '(', '"Skipping, not a package: {!r}"', ',', 'module_name', ')', 'continue', 'log', '.', 'debug', '(', '"Manually importing: {!r}"', ',', 'module_name', ')', '# noinspection PyBroadException', 'try', ':', 'candidate', '=', 'loader', '.', 'find_module', '(', 'module_name', ')', '.', 'load_module', '(', 'module_name', ')', '# noqa', 'except', 'Exception', ':', '# e.g. Alembic "autogenerate" gives: "ValueError: attempted', '# relative import beyond top-level package"; or Django', '# "django.core.exceptions.ImproperlyConfigured"', 'log', '.', 'error', '(', '"Package failed to import: {!r}"', ',', 'module_name', ')', 'continue', 'if', 'contains_c_extension', '(', 'module', '=', 'candidate', ',', 'import_all_submodules', '=', 'False', ',', '# only done at the top level # noqa', 'include_external_imports', '=', 'include_external_imports', ',', 'seen', '=', 'seen', ')', ':', 'return', 'True', 'except', 'Exception', ':', 'log', '.', 'error', '(', '"Unable to walk packages further; no C extensions "', '"detected so far!"', ')', 'raise', 'return', 'False'] | Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django) | ['Extends', ':', 'func', ':', 'is_c_extension', 'by', 'asking', ':', 'is', 'this', 'module', 'or', 'any', 'of', 'its', 'submodules', 'a', 'C', 'extension?'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/modules.py#L157-L308 |
554 | trombastic/PyScada | pyscada/utils/scheduler.py | Scheduler.stop | def stop(self, sig=signal.SIGTERM):
"""
stop the scheduler and stop all processes
"""
if self.pid is None:
self.pid = self.read_pid()
if self.pid is None:
sp = BackgroundProcess.objects.filter(pk=1).first()
if sp:
self.pid = sp.pid
if self.pid is None or self.pid == 0:
logger.error("can't determine process id exiting.")
return False
if self.pid != getpid():
# calling from outside the daemon instance
logger.debug('send sigterm to daemon')
try:
kill(self.pid, sig)
return True
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
return False
logger.debug('start termination of the daemon')
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='stopping..')
timeout = time() + 60 # wait max 60 seconds
self.kill_processes(signal.SIGTERM)
while self.PROCESSES and time() < timeout:
self.kill_processes(signal.SIGTERM)
sleep(1)
self.kill_processes(signal.SIGKILL)
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='stopped')
logger.debug('termination of the daemon done')
return True | python | def stop(self, sig=signal.SIGTERM):
"""
stop the scheduler and stop all processes
"""
if self.pid is None:
self.pid = self.read_pid()
if self.pid is None:
sp = BackgroundProcess.objects.filter(pk=1).first()
if sp:
self.pid = sp.pid
if self.pid is None or self.pid == 0:
logger.error("can't determine process id exiting.")
return False
if self.pid != getpid():
# calling from outside the daemon instance
logger.debug('send sigterm to daemon')
try:
kill(self.pid, sig)
return True
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
return False
logger.debug('start termination of the daemon')
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='stopping..')
timeout = time() + 60 # wait max 60 seconds
self.kill_processes(signal.SIGTERM)
while self.PROCESSES and time() < timeout:
self.kill_processes(signal.SIGTERM)
sleep(1)
self.kill_processes(signal.SIGKILL)
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='stopped')
logger.debug('termination of the daemon done')
return True | ['def', 'stop', '(', 'self', ',', 'sig', '=', 'signal', '.', 'SIGTERM', ')', ':', 'if', 'self', '.', 'pid', 'is', 'None', ':', 'self', '.', 'pid', '=', 'self', '.', 'read_pid', '(', ')', 'if', 'self', '.', 'pid', 'is', 'None', ':', 'sp', '=', 'BackgroundProcess', '.', 'objects', '.', 'filter', '(', 'pk', '=', '1', ')', '.', 'first', '(', ')', 'if', 'sp', ':', 'self', '.', 'pid', '=', 'sp', '.', 'pid', 'if', 'self', '.', 'pid', 'is', 'None', 'or', 'self', '.', 'pid', '==', '0', ':', 'logger', '.', 'error', '(', '"can\'t determine process id exiting."', ')', 'return', 'False', 'if', 'self', '.', 'pid', '!=', 'getpid', '(', ')', ':', '# calling from outside the daemon instance', 'logger', '.', 'debug', '(', "'send sigterm to daemon'", ')', 'try', ':', 'kill', '(', 'self', '.', 'pid', ',', 'sig', ')', 'return', 'True', 'except', 'OSError', 'as', 'e', ':', 'if', 'e', '.', 'errno', '==', 'errno', '.', 'ESRCH', ':', 'return', 'False', 'else', ':', 'return', 'False', 'logger', '.', 'debug', '(', "'start termination of the daemon'", ')', 'BackgroundProcess', '.', 'objects', '.', 'filter', '(', 'pk', '=', 'self', '.', 'process_id', ')', '.', 'update', '(', 'last_update', '=', 'now', '(', ')', ',', 'message', '=', "'stopping..'", ')', 'timeout', '=', 'time', '(', ')', '+', '60', '# wait max 60 seconds', 'self', '.', 'kill_processes', '(', 'signal', '.', 'SIGTERM', ')', 'while', 'self', '.', 'PROCESSES', 'and', 'time', '(', ')', '<', 'timeout', ':', 'self', '.', 'kill_processes', '(', 'signal', '.', 'SIGTERM', ')', 'sleep', '(', '1', ')', 'self', '.', 'kill_processes', '(', 'signal', '.', 'SIGKILL', ')', 'BackgroundProcess', '.', 'objects', '.', 'filter', '(', 'pk', '=', 'self', '.', 'process_id', ')', '.', 'update', '(', 'last_update', '=', 'now', '(', ')', ',', 'message', '=', "'stopped'", ')', 'logger', '.', 'debug', '(', "'termination of the daemon done'", ')', 'return', 'True'] | stop the scheduler and stop all processes | ['stop', 'the', 'scheduler', 'and', 'stop', 'all', 'processes'] | train | https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/utils/scheduler.py#L443-L484 |
555 | BerkeleyAutomation/perception | perception/image.py | ColorImage.foreground_mask | def foreground_mask(
self,
tolerance,
ignore_black=True,
use_hsv=False,
scale=8,
bgmodel=None):
"""Creates a binary image mask for the foreground of an image against
a uniformly colored background. The background is assumed to be the mode value of the histogram
for each of the color channels.
Parameters
----------
tolerance : int
A +/- level from the detected mean backgroud color. Pixels withing
this range will be classified as background pixels and masked out.
ignore_black : bool
If True, the zero pixels will be ignored
when computing the background model.
use_hsv : bool
If True, image will be converted to HSV for background model
generation.
scale : int
Size of background histogram bins -- there will be BINARY_IM_MAX_VAL/size bins
in the color histogram for each channel.
bgmodel : :obj:`list` of int
A list containing the red, green, and blue channel modes of the
background. If this is None, a background model will be generated
using the other parameters.
Returns
-------
:obj:`BinaryImage`
A binary image that masks out the background from the current
ColorImage.
"""
# get a background model
if bgmodel is None:
bgmodel = self.background_model(ignore_black=ignore_black,
use_hsv=use_hsv,
scale=scale)
# get the bounds
lower_bound = np.array(
[bgmodel[i] - tolerance for i in range(self.channels)])
upper_bound = np.array(
[bgmodel[i] + tolerance for i in range(self.channels)])
orig_zero_indices = np.where(np.sum(self._data, axis=2) == 0)
# threshold
binary_data = cv2.inRange(self.data, lower_bound, upper_bound)
binary_data[:, :, ] = (BINARY_IM_MAX_VAL - binary_data[:, :, ])
binary_data[orig_zero_indices[0], orig_zero_indices[1], ] = 0.0
binary_im = BinaryImage(binary_data.astype(np.uint8), frame=self.frame)
return binary_im | python | def foreground_mask(
self,
tolerance,
ignore_black=True,
use_hsv=False,
scale=8,
bgmodel=None):
"""Creates a binary image mask for the foreground of an image against
a uniformly colored background. The background is assumed to be the mode value of the histogram
for each of the color channels.
Parameters
----------
tolerance : int
A +/- level from the detected mean backgroud color. Pixels withing
this range will be classified as background pixels and masked out.
ignore_black : bool
If True, the zero pixels will be ignored
when computing the background model.
use_hsv : bool
If True, image will be converted to HSV for background model
generation.
scale : int
Size of background histogram bins -- there will be BINARY_IM_MAX_VAL/size bins
in the color histogram for each channel.
bgmodel : :obj:`list` of int
A list containing the red, green, and blue channel modes of the
background. If this is None, a background model will be generated
using the other parameters.
Returns
-------
:obj:`BinaryImage`
A binary image that masks out the background from the current
ColorImage.
"""
# get a background model
if bgmodel is None:
bgmodel = self.background_model(ignore_black=ignore_black,
use_hsv=use_hsv,
scale=scale)
# get the bounds
lower_bound = np.array(
[bgmodel[i] - tolerance for i in range(self.channels)])
upper_bound = np.array(
[bgmodel[i] + tolerance for i in range(self.channels)])
orig_zero_indices = np.where(np.sum(self._data, axis=2) == 0)
# threshold
binary_data = cv2.inRange(self.data, lower_bound, upper_bound)
binary_data[:, :, ] = (BINARY_IM_MAX_VAL - binary_data[:, :, ])
binary_data[orig_zero_indices[0], orig_zero_indices[1], ] = 0.0
binary_im = BinaryImage(binary_data.astype(np.uint8), frame=self.frame)
return binary_im | ['def', 'foreground_mask', '(', 'self', ',', 'tolerance', ',', 'ignore_black', '=', 'True', ',', 'use_hsv', '=', 'False', ',', 'scale', '=', '8', ',', 'bgmodel', '=', 'None', ')', ':', '# get a background model', 'if', 'bgmodel', 'is', 'None', ':', 'bgmodel', '=', 'self', '.', 'background_model', '(', 'ignore_black', '=', 'ignore_black', ',', 'use_hsv', '=', 'use_hsv', ',', 'scale', '=', 'scale', ')', '# get the bounds', 'lower_bound', '=', 'np', '.', 'array', '(', '[', 'bgmodel', '[', 'i', ']', '-', 'tolerance', 'for', 'i', 'in', 'range', '(', 'self', '.', 'channels', ')', ']', ')', 'upper_bound', '=', 'np', '.', 'array', '(', '[', 'bgmodel', '[', 'i', ']', '+', 'tolerance', 'for', 'i', 'in', 'range', '(', 'self', '.', 'channels', ')', ']', ')', 'orig_zero_indices', '=', 'np', '.', 'where', '(', 'np', '.', 'sum', '(', 'self', '.', '_data', ',', 'axis', '=', '2', ')', '==', '0', ')', '# threshold', 'binary_data', '=', 'cv2', '.', 'inRange', '(', 'self', '.', 'data', ',', 'lower_bound', ',', 'upper_bound', ')', 'binary_data', '[', ':', ',', ':', ',', ']', '=', '(', 'BINARY_IM_MAX_VAL', '-', 'binary_data', '[', ':', ',', ':', ',', ']', ')', 'binary_data', '[', 'orig_zero_indices', '[', '0', ']', ',', 'orig_zero_indices', '[', '1', ']', ',', ']', '=', '0.0', 'binary_im', '=', 'BinaryImage', '(', 'binary_data', '.', 'astype', '(', 'np', '.', 'uint8', ')', ',', 'frame', '=', 'self', '.', 'frame', ')', 'return', 'binary_im'] | Creates a binary image mask for the foreground of an image against
a uniformly colored background. The background is assumed to be the mode value of the histogram
for each of the color channels.
Parameters
----------
tolerance : int
A +/- level from the detected mean backgroud color. Pixels withing
this range will be classified as background pixels and masked out.
ignore_black : bool
If True, the zero pixels will be ignored
when computing the background model.
use_hsv : bool
If True, image will be converted to HSV for background model
generation.
scale : int
Size of background histogram bins -- there will be BINARY_IM_MAX_VAL/size bins
in the color histogram for each channel.
bgmodel : :obj:`list` of int
A list containing the red, green, and blue channel modes of the
background. If this is None, a background model will be generated
using the other parameters.
Returns
-------
:obj:`BinaryImage`
A binary image that masks out the background from the current
ColorImage. | ['Creates', 'a', 'binary', 'image', 'mask', 'for', 'the', 'foreground', 'of', 'an', 'image', 'against', 'a', 'uniformly', 'colored', 'background', '.', 'The', 'background', 'is', 'assumed', 'to', 'be', 'the', 'mode', 'value', 'of', 'the', 'histogram', 'for', 'each', 'of', 'the', 'color', 'channels', '.'] | train | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L1147-L1205 |
556 | GoogleCloudPlatform/appengine-pipelines | python/src/pipeline/pipeline.py | Pipeline._callback_internal | def _callback_internal(self, kwargs):
"""Used to execute callbacks on asynchronous pipelines."""
logging.debug('Callback %s(*%s, **%s)#%s with params: %r',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name(), kwargs)
return self.callback(**kwargs) | python | def _callback_internal(self, kwargs):
"""Used to execute callbacks on asynchronous pipelines."""
logging.debug('Callback %s(*%s, **%s)#%s with params: %r',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name(), kwargs)
return self.callback(**kwargs) | ['def', '_callback_internal', '(', 'self', ',', 'kwargs', ')', ':', 'logging', '.', 'debug', '(', "'Callback %s(*%s, **%s)#%s with params: %r'", ',', 'self', '.', '_class_path', ',', '_short_repr', '(', 'self', '.', 'args', ')', ',', '_short_repr', '(', 'self', '.', 'kwargs', ')', ',', 'self', '.', '_pipeline_key', '.', 'name', '(', ')', ',', 'kwargs', ')', 'return', 'self', '.', 'callback', '(', '*', '*', 'kwargs', ')'] | Used to execute callbacks on asynchronous pipelines. | ['Used', 'to', 'execute', 'callbacks', 'on', 'asynchronous', 'pipelines', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1091-L1096 |
557 | totalgood/pugnlp | src/pugnlp/util.py | normalize_scientific_notation | def normalize_scientific_notation(s, ignore_commas=True, verbosity=1):
"""Produce a string convertable with float(s), if possible, fixing some common scientific notations
Deletes commas and allows addition.
>>> normalize_scientific_notation(' -123 x 10^-45 ')
'-123e-45'
>>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ')
'1233e-5678'
>>> normalize_scientific_notation('$42.42')
'42.42'
"""
s = s.lstrip(charlist.not_digits_nor_sign)
s = s.rstrip(charlist.not_digits)
# print s
# TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation
num_strings = rex.scientific_notation_exponent.split(s, maxsplit=2)
# print num_strings
# get rid of commas
s = rex.re.sub(r"[^.0-9-+" + "," * int(not ignore_commas) + r"]+", '', num_strings[0])
# print s
# if this value gets so large that it requires an exponential notation, this will break the conversion
if not s:
return None
try:
s = str(eval(s.strip().lstrip('0')))
except (IndexError, ValueError, AttributeError, TypeError):
if verbosity > 1:
print('Unable to evaluate %s' % repr(s))
try:
s = str(float(s))
except (IndexError, ValueError, AttributeError, TypeError):
print('Unable to float %s' % repr(s))
s = ''
# print s
if len(num_strings) > 1:
if not s:
s = '1'
s += 'e' + rex.re.sub(r'[^.0-9-+]+', '', num_strings[1])
if s:
return s
return None | python | def normalize_scientific_notation(s, ignore_commas=True, verbosity=1):
"""Produce a string convertable with float(s), if possible, fixing some common scientific notations
Deletes commas and allows addition.
>>> normalize_scientific_notation(' -123 x 10^-45 ')
'-123e-45'
>>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ')
'1233e-5678'
>>> normalize_scientific_notation('$42.42')
'42.42'
"""
s = s.lstrip(charlist.not_digits_nor_sign)
s = s.rstrip(charlist.not_digits)
# print s
# TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation
num_strings = rex.scientific_notation_exponent.split(s, maxsplit=2)
# print num_strings
# get rid of commas
s = rex.re.sub(r"[^.0-9-+" + "," * int(not ignore_commas) + r"]+", '', num_strings[0])
# print s
# if this value gets so large that it requires an exponential notation, this will break the conversion
if not s:
return None
try:
s = str(eval(s.strip().lstrip('0')))
except (IndexError, ValueError, AttributeError, TypeError):
if verbosity > 1:
print('Unable to evaluate %s' % repr(s))
try:
s = str(float(s))
except (IndexError, ValueError, AttributeError, TypeError):
print('Unable to float %s' % repr(s))
s = ''
# print s
if len(num_strings) > 1:
if not s:
s = '1'
s += 'e' + rex.re.sub(r'[^.0-9-+]+', '', num_strings[1])
if s:
return s
return None | ['def', 'normalize_scientific_notation', '(', 's', ',', 'ignore_commas', '=', 'True', ',', 'verbosity', '=', '1', ')', ':', 's', '=', 's', '.', 'lstrip', '(', 'charlist', '.', 'not_digits_nor_sign', ')', 's', '=', 's', '.', 'rstrip', '(', 'charlist', '.', 'not_digits', ')', '# print s', '# TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation', 'num_strings', '=', 'rex', '.', 'scientific_notation_exponent', '.', 'split', '(', 's', ',', 'maxsplit', '=', '2', ')', '# print num_strings', '# get rid of commas', 's', '=', 'rex', '.', 're', '.', 'sub', '(', 'r"[^.0-9-+"', '+', '","', '*', 'int', '(', 'not', 'ignore_commas', ')', '+', 'r"]+"', ',', "''", ',', 'num_strings', '[', '0', ']', ')', '# print s', '# if this value gets so large that it requires an exponential notation, this will break the conversion', 'if', 'not', 's', ':', 'return', 'None', 'try', ':', 's', '=', 'str', '(', 'eval', '(', 's', '.', 'strip', '(', ')', '.', 'lstrip', '(', "'0'", ')', ')', ')', 'except', '(', 'IndexError', ',', 'ValueError', ',', 'AttributeError', ',', 'TypeError', ')', ':', 'if', 'verbosity', '>', '1', ':', 'print', '(', "'Unable to evaluate %s'", '%', 'repr', '(', 's', ')', ')', 'try', ':', 's', '=', 'str', '(', 'float', '(', 's', ')', ')', 'except', '(', 'IndexError', ',', 'ValueError', ',', 'AttributeError', ',', 'TypeError', ')', ':', 'print', '(', "'Unable to float %s'", '%', 'repr', '(', 's', ')', ')', 's', '=', "''", '# print s', 'if', 'len', '(', 'num_strings', ')', '>', '1', ':', 'if', 'not', 's', ':', 's', '=', "'1'", 's', '+=', "'e'", '+', 'rex', '.', 're', '.', 'sub', '(', "r'[^.0-9-+]+'", ',', "''", ',', 'num_strings', '[', '1', ']', ')', 'if', 's', ':', 'return', 's', 'return', 'None'] | Produce a string convertable with float(s), if possible, fixing some common scientific notations
Deletes commas and allows addition.
>>> normalize_scientific_notation(' -123 x 10^-45 ')
'-123e-45'
>>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ')
'1233e-5678'
>>> normalize_scientific_notation('$42.42')
'42.42' | ['Produce', 'a', 'string', 'convertable', 'with', 'float', '(', 's', ')', 'if', 'possible', 'fixing', 'some', 'common', 'scientific', 'notations'] | train | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1980-L2020 |
558 | tallforasmurf/byteplay | byteplay3.py | Code._findlinestarts | def _findlinestarts(code_object):
"""
Find the offsets in a byte code which are the start of source lines.
Generate pairs (offset, lineno) as described in Python/compile.c.
This is a modified version of dis.findlinestarts. This version allows
multiple "line starts" with the same line number. (The dis version
conditions its yield on a test "if lineno != lastlineno".)
FYI: code.co_lnotab is a byte array with one pair of bytes for each
effective source line number in the bytecode. An effective line is
one that generates code: not blank or comment lines. The first actual
line number, typically the number of the "def" statement, is in
code.co_firstlineno.
An even byte of co_lnotab is the offset to the bytecode generated
from the next effective line number. The following odd byte is an
increment on the previous line's number to the next line's number.
Thus co_firstlineno+co_lnotab[1] is the first effective line's
number, and co_lnotab[0] is the number of bytes it generated.
Note that an effective line number generates code by definition,
hence the even byte cannot be zero; and as line numbers are
monotonically increasing, the odd byte cannot be zero either.
But what, the curious reader might ask, does Python do if a source
line generates more than 255 bytes of code? In that *highly* unlikely
case compile.c generates multiple pairs of (255,0) until it has
accounted for all the generated code, then a final pair of
(offset%256, lineincr).
Oh, but what, the curious reader asks, do they do if there is a gap
of more than 255 between effective line numbers? It is not unheard of
to find blocks of comments larger than 255 lines (like this one?).
Then compile.c generates pairs of (0, 255) until it has accounted for
the line number difference and a final pair of (offset,lineincr%256).
Uh, but...? Yes, what now, annoying reader? Well, does the following
code handle these special cases of (255,0) and (0,255) properly?
It handles the (0,255) case correctly, because of the "if byte_incr"
test which skips the yield() but increments lineno. It does not handle
the case of (255,0) correctly; it will yield false pairs (255,0).
Fortunately that will only arise e.g. when disassembling some
"obfuscated" code where most newlines are replaced with semicolons.
Oh, and yes, the to_code() method does properly handle generation
of the (255,0) and (0,255) entries correctly.
"""
# grab the even bytes as integer byte_increments:
byte_increments = [c for c in code_object.co_lnotab[0::2]]
# grab the odd bytes as integer line_increments:
line_increments = [c for c in code_object.co_lnotab[1::2]]
lineno = code_object.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
yield (addr, lineno)
addr += byte_incr
lineno += line_incr
yield (addr, lineno) | python | def _findlinestarts(code_object):
"""
Find the offsets in a byte code which are the start of source lines.
Generate pairs (offset, lineno) as described in Python/compile.c.
This is a modified version of dis.findlinestarts. This version allows
multiple "line starts" with the same line number. (The dis version
conditions its yield on a test "if lineno != lastlineno".)
FYI: code.co_lnotab is a byte array with one pair of bytes for each
effective source line number in the bytecode. An effective line is
one that generates code: not blank or comment lines. The first actual
line number, typically the number of the "def" statement, is in
code.co_firstlineno.
An even byte of co_lnotab is the offset to the bytecode generated
from the next effective line number. The following odd byte is an
increment on the previous line's number to the next line's number.
Thus co_firstlineno+co_lnotab[1] is the first effective line's
number, and co_lnotab[0] is the number of bytes it generated.
Note that an effective line number generates code by definition,
hence the even byte cannot be zero; and as line numbers are
monotonically increasing, the odd byte cannot be zero either.
But what, the curious reader might ask, does Python do if a source
line generates more than 255 bytes of code? In that *highly* unlikely
case compile.c generates multiple pairs of (255,0) until it has
accounted for all the generated code, then a final pair of
(offset%256, lineincr).
Oh, but what, the curious reader asks, do they do if there is a gap
of more than 255 between effective line numbers? It is not unheard of
to find blocks of comments larger than 255 lines (like this one?).
Then compile.c generates pairs of (0, 255) until it has accounted for
the line number difference and a final pair of (offset,lineincr%256).
Uh, but...? Yes, what now, annoying reader? Well, does the following
code handle these special cases of (255,0) and (0,255) properly?
It handles the (0,255) case correctly, because of the "if byte_incr"
test which skips the yield() but increments lineno. It does not handle
the case of (255,0) correctly; it will yield false pairs (255,0).
Fortunately that will only arise e.g. when disassembling some
"obfuscated" code where most newlines are replaced with semicolons.
Oh, and yes, the to_code() method does properly handle generation
of the (255,0) and (0,255) entries correctly.
"""
# grab the even bytes as integer byte_increments:
byte_increments = [c for c in code_object.co_lnotab[0::2]]
# grab the odd bytes as integer line_increments:
line_increments = [c for c in code_object.co_lnotab[1::2]]
lineno = code_object.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
yield (addr, lineno)
addr += byte_incr
lineno += line_incr
yield (addr, lineno) | ['def', '_findlinestarts', '(', 'code_object', ')', ':', '# grab the even bytes as integer byte_increments:', 'byte_increments', '=', '[', 'c', 'for', 'c', 'in', 'code_object', '.', 'co_lnotab', '[', '0', ':', ':', '2', ']', ']', '# grab the odd bytes as integer line_increments:', 'line_increments', '=', '[', 'c', 'for', 'c', 'in', 'code_object', '.', 'co_lnotab', '[', '1', ':', ':', '2', ']', ']', 'lineno', '=', 'code_object', '.', 'co_firstlineno', 'addr', '=', '0', 'for', 'byte_incr', ',', 'line_incr', 'in', 'zip', '(', 'byte_increments', ',', 'line_increments', ')', ':', 'if', 'byte_incr', ':', 'yield', '(', 'addr', ',', 'lineno', ')', 'addr', '+=', 'byte_incr', 'lineno', '+=', 'line_incr', 'yield', '(', 'addr', ',', 'lineno', ')'] | Find the offsets in a byte code which are the start of source lines.
Generate pairs (offset, lineno) as described in Python/compile.c.
This is a modified version of dis.findlinestarts. This version allows
multiple "line starts" with the same line number. (The dis version
conditions its yield on a test "if lineno != lastlineno".)
FYI: code.co_lnotab is a byte array with one pair of bytes for each
effective source line number in the bytecode. An effective line is
one that generates code: not blank or comment lines. The first actual
line number, typically the number of the "def" statement, is in
code.co_firstlineno.
An even byte of co_lnotab is the offset to the bytecode generated
from the next effective line number. The following odd byte is an
increment on the previous line's number to the next line's number.
Thus co_firstlineno+co_lnotab[1] is the first effective line's
number, and co_lnotab[0] is the number of bytes it generated.
Note that an effective line number generates code by definition,
hence the even byte cannot be zero; and as line numbers are
monotonically increasing, the odd byte cannot be zero either.
But what, the curious reader might ask, does Python do if a source
line generates more than 255 bytes of code? In that *highly* unlikely
case compile.c generates multiple pairs of (255,0) until it has
accounted for all the generated code, then a final pair of
(offset%256, lineincr).
Oh, but what, the curious reader asks, do they do if there is a gap
of more than 255 between effective line numbers? It is not unheard of
to find blocks of comments larger than 255 lines (like this one?).
Then compile.c generates pairs of (0, 255) until it has accounted for
the line number difference and a final pair of (offset,lineincr%256).
Uh, but...? Yes, what now, annoying reader? Well, does the following
code handle these special cases of (255,0) and (0,255) properly?
It handles the (0,255) case correctly, because of the "if byte_incr"
test which skips the yield() but increments lineno. It does not handle
the case of (255,0) correctly; it will yield false pairs (255,0).
Fortunately that will only arise e.g. when disassembling some
"obfuscated" code where most newlines are replaced with semicolons.
Oh, and yes, the to_code() method does properly handle generation
of the (255,0) and (0,255) entries correctly. | ['Find', 'the', 'offsets', 'in', 'a', 'byte', 'code', 'which', 'are', 'the', 'start', 'of', 'source', 'lines', '.'] | train | https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L808-L870 |
559 | chovanecm/sacredboard | sacredboard/app/data/pymongo/rundao.py | MongoRunDAO._to_mongo_query | def _to_mongo_query(query):
"""
Convert the query received by the Sacred Web API to a MongoDB query.
Takes a query in format
{"type": "and", "filters": [
{"field": "host.hostname", "operator": "==", "value": "ntbacer"},
{"type": "or", "filters": [
{"field": "result", "operator": "==", "value": 2403.52},
{"field": "host.python_version", "operator": "==", "value":"3.5.2"}
]}]}
and returns an appropriate MongoDB Query.
:param query: A query in the Sacred Web API format.
:return: Mongo Query.
"""
mongo_query = []
for clause in query["filters"]:
if clause.get("type") is None:
mongo_clause = MongoRunDAO. \
_simple_clause_to_query(clause)
else:
# It's a subclause
mongo_clause = MongoRunDAO._to_mongo_query(clause)
mongo_query.append(mongo_clause)
if len(mongo_query) == 0:
return {}
if query["type"] == "and":
return {"$and": mongo_query}
elif query["type"] == "or":
return {"$or": mongo_query}
else:
raise ValueError("Unexpected query type %s" % query.get("type")) | python | def _to_mongo_query(query):
"""
Convert the query received by the Sacred Web API to a MongoDB query.
Takes a query in format
{"type": "and", "filters": [
{"field": "host.hostname", "operator": "==", "value": "ntbacer"},
{"type": "or", "filters": [
{"field": "result", "operator": "==", "value": 2403.52},
{"field": "host.python_version", "operator": "==", "value":"3.5.2"}
]}]}
and returns an appropriate MongoDB Query.
:param query: A query in the Sacred Web API format.
:return: Mongo Query.
"""
mongo_query = []
for clause in query["filters"]:
if clause.get("type") is None:
mongo_clause = MongoRunDAO. \
_simple_clause_to_query(clause)
else:
# It's a subclause
mongo_clause = MongoRunDAO._to_mongo_query(clause)
mongo_query.append(mongo_clause)
if len(mongo_query) == 0:
return {}
if query["type"] == "and":
return {"$and": mongo_query}
elif query["type"] == "or":
return {"$or": mongo_query}
else:
raise ValueError("Unexpected query type %s" % query.get("type")) | ['def', '_to_mongo_query', '(', 'query', ')', ':', 'mongo_query', '=', '[', ']', 'for', 'clause', 'in', 'query', '[', '"filters"', ']', ':', 'if', 'clause', '.', 'get', '(', '"type"', ')', 'is', 'None', ':', 'mongo_clause', '=', 'MongoRunDAO', '.', '_simple_clause_to_query', '(', 'clause', ')', 'else', ':', "# It's a subclause", 'mongo_clause', '=', 'MongoRunDAO', '.', '_to_mongo_query', '(', 'clause', ')', 'mongo_query', '.', 'append', '(', 'mongo_clause', ')', 'if', 'len', '(', 'mongo_query', ')', '==', '0', ':', 'return', '{', '}', 'if', 'query', '[', '"type"', ']', '==', '"and"', ':', 'return', '{', '"$and"', ':', 'mongo_query', '}', 'elif', 'query', '[', '"type"', ']', '==', '"or"', ':', 'return', '{', '"$or"', ':', 'mongo_query', '}', 'else', ':', 'raise', 'ValueError', '(', '"Unexpected query type %s"', '%', 'query', '.', 'get', '(', '"type"', ')', ')'] | Convert the query received by the Sacred Web API to a MongoDB query.
Takes a query in format
{"type": "and", "filters": [
{"field": "host.hostname", "operator": "==", "value": "ntbacer"},
{"type": "or", "filters": [
{"field": "result", "operator": "==", "value": 2403.52},
{"field": "host.python_version", "operator": "==", "value":"3.5.2"}
]}]}
and returns an appropriate MongoDB Query.
:param query: A query in the Sacred Web API format.
:return: Mongo Query. | ['Convert', 'the', 'query', 'received', 'by', 'the', 'Sacred', 'Web', 'API', 'to', 'a', 'MongoDB', 'query', '.'] | train | https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/rundao.py#L114-L146 |
560 | saltstack/salt | salt/modules/scp_mod.py | get | def get(remote_path,
local_path='',
recursive=False,
preserve_times=False,
**kwargs):
'''
Transfer files and directories from remote host to the localhost of the
Minion.
remote_path
Path to retrieve from remote host. Since this is evaluated by scp on the
remote host, shell wildcards and environment variables may be used.
recursive: ``False``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' scp.get /var/tmp/file /tmp/file hostname=10.10.10.1 auto_add_policy=True
'''
scp_client = _prepare_connection(**kwargs)
get_kwargs = {
'recursive': recursive,
'preserve_times': preserve_times
}
if local_path:
get_kwargs['local_path'] = local_path
return scp_client.get(remote_path, **get_kwargs) | python | def get(remote_path,
local_path='',
recursive=False,
preserve_times=False,
**kwargs):
'''
Transfer files and directories from remote host to the localhost of the
Minion.
remote_path
Path to retrieve from remote host. Since this is evaluated by scp on the
remote host, shell wildcards and environment variables may be used.
recursive: ``False``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' scp.get /var/tmp/file /tmp/file hostname=10.10.10.1 auto_add_policy=True
'''
scp_client = _prepare_connection(**kwargs)
get_kwargs = {
'recursive': recursive,
'preserve_times': preserve_times
}
if local_path:
get_kwargs['local_path'] = local_path
return scp_client.get(remote_path, **get_kwargs) | ['def', 'get', '(', 'remote_path', ',', 'local_path', '=', "''", ',', 'recursive', '=', 'False', ',', 'preserve_times', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'scp_client', '=', '_prepare_connection', '(', '*', '*', 'kwargs', ')', 'get_kwargs', '=', '{', "'recursive'", ':', 'recursive', ',', "'preserve_times'", ':', 'preserve_times', '}', 'if', 'local_path', ':', 'get_kwargs', '[', "'local_path'", ']', '=', 'local_path', 'return', 'scp_client', '.', 'get', '(', 'remote_path', ',', '*', '*', 'get_kwargs', ')'] | Transfer files and directories from remote host to the localhost of the
Minion.
remote_path
Path to retrieve from remote host. Since this is evaluated by scp on the
remote host, shell wildcards and environment variables may be used.
recursive: ``False``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' scp.get /var/tmp/file /tmp/file hostname=10.10.10.1 auto_add_policy=True | ['Transfer', 'files', 'and', 'directories', 'from', 'remote', 'host', 'to', 'the', 'localhost', 'of', 'the', 'Minion', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/scp_mod.py#L69-L151 |
561 | mwouts/jupytext | jupytext/languages.py | cell_language | def cell_language(source):
"""Return cell language and language options, if any"""
if source:
line = source[0]
if line.startswith('%%'):
magic = line[2:]
if ' ' in magic:
lang, magic_args = magic.split(' ', 1)
else:
lang = magic
magic_args = ''
if lang in _JUPYTER_LANGUAGES:
source.pop(0)
return lang, magic_args
return None, None | python | def cell_language(source):
"""Return cell language and language options, if any"""
if source:
line = source[0]
if line.startswith('%%'):
magic = line[2:]
if ' ' in magic:
lang, magic_args = magic.split(' ', 1)
else:
lang = magic
magic_args = ''
if lang in _JUPYTER_LANGUAGES:
source.pop(0)
return lang, magic_args
return None, None | ['def', 'cell_language', '(', 'source', ')', ':', 'if', 'source', ':', 'line', '=', 'source', '[', '0', ']', 'if', 'line', '.', 'startswith', '(', "'%%'", ')', ':', 'magic', '=', 'line', '[', '2', ':', ']', 'if', "' '", 'in', 'magic', ':', 'lang', ',', 'magic_args', '=', 'magic', '.', 'split', '(', "' '", ',', '1', ')', 'else', ':', 'lang', '=', 'magic', 'magic_args', '=', "''", 'if', 'lang', 'in', '_JUPYTER_LANGUAGES', ':', 'source', '.', 'pop', '(', '0', ')', 'return', 'lang', ',', 'magic_args', 'return', 'None', ',', 'None'] | Return cell language and language options, if any | ['Return', 'cell', 'language', 'and', 'language', 'options', 'if', 'any'] | train | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/languages.py#L68-L84 |
562 | chimpler/pyhocon | pyhocon/converter.py | HOCONConverter.convert_from_file | def convert_from_file(cls, input_file=None, output_file=None, output_format='json', indent=2, compact=False):
"""Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation
"""
if input_file is None:
content = sys.stdin.read()
config = ConfigFactory.parse_string(content)
else:
config = ConfigFactory.parse_file(input_file)
res = cls.convert(config, output_format, indent, compact)
if output_file is None:
print(res)
else:
with open(output_file, "w") as fd:
fd.write(res) | python | def convert_from_file(cls, input_file=None, output_file=None, output_format='json', indent=2, compact=False):
"""Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation
"""
if input_file is None:
content = sys.stdin.read()
config = ConfigFactory.parse_string(content)
else:
config = ConfigFactory.parse_file(input_file)
res = cls.convert(config, output_format, indent, compact)
if output_file is None:
print(res)
else:
with open(output_file, "w") as fd:
fd.write(res) | ['def', 'convert_from_file', '(', 'cls', ',', 'input_file', '=', 'None', ',', 'output_file', '=', 'None', ',', 'output_format', '=', "'json'", ',', 'indent', '=', '2', ',', 'compact', '=', 'False', ')', ':', 'if', 'input_file', 'is', 'None', ':', 'content', '=', 'sys', '.', 'stdin', '.', 'read', '(', ')', 'config', '=', 'ConfigFactory', '.', 'parse_string', '(', 'content', ')', 'else', ':', 'config', '=', 'ConfigFactory', '.', 'parse_file', '(', 'input_file', ')', 'res', '=', 'cls', '.', 'convert', '(', 'config', ',', 'output_format', ',', 'indent', ',', 'compact', ')', 'if', 'output_file', 'is', 'None', ':', 'print', '(', 'res', ')', 'else', ':', 'with', 'open', '(', 'output_file', ',', '"w"', ')', 'as', 'fd', ':', 'fd', '.', 'write', '(', 'res', ')'] | Convert to json, properties or yaml
:param input_file: input file, if not specified stdin
:param output_file: output file, if not specified stdout
:param output_format: json, properties or yaml
:return: json, properties or yaml string representation | ['Convert', 'to', 'json', 'properties', 'or', 'yaml'] | train | https://github.com/chimpler/pyhocon/blob/e5b22a8e74e8f88e43cf9e9140cca5f2cd0ab4a3/pyhocon/converter.py#L237-L257 |
563 | thespacedoctor/polyglot | polyglot/markdown/translate.py | translate.definition | def definition(
self,
text,
definition):
"""*genarate a MMD definition*
**Key Arguments:**
- ``text`` -- the text to define
- ``definition`` -- the definition
**Return:**
- ``definition`` -- the MMD style definition
**Usage:**
To genarate a MMD definition:
.. code-block:: python
text = \"\"\"Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
Also the makers of really great products.\"\"\"
definition = md.definition("Apple", text)
print definition
# OUTPUT:
# Apple
# : Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
# Also the makers of really great products.
#
"""
text = text.strip()
definition = definition.strip()
regex = re.compile(r'\n(\S)')
definition = regex.sub("\n \g<1>", definition)
return "%(text)s\n: %(definition)s" % locals() | python | def definition(
self,
text,
definition):
"""*genarate a MMD definition*
**Key Arguments:**
- ``text`` -- the text to define
- ``definition`` -- the definition
**Return:**
- ``definition`` -- the MMD style definition
**Usage:**
To genarate a MMD definition:
.. code-block:: python
text = \"\"\"Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
Also the makers of really great products.\"\"\"
definition = md.definition("Apple", text)
print definition
# OUTPUT:
# Apple
# : Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
# Also the makers of really great products.
#
"""
text = text.strip()
definition = definition.strip()
regex = re.compile(r'\n(\S)')
definition = regex.sub("\n \g<1>", definition)
return "%(text)s\n: %(definition)s" % locals() | ['def', 'definition', '(', 'self', ',', 'text', ',', 'definition', ')', ':', 'text', '=', 'text', '.', 'strip', '(', ')', 'definition', '=', 'definition', '.', 'strip', '(', ')', 'regex', '=', 're', '.', 'compile', '(', "r'\\n(\\S)'", ')', 'definition', '=', 'regex', '.', 'sub', '(', '"\\n \\g<1>"', ',', 'definition', ')', 'return', '"%(text)s\\n: %(definition)s"', '%', 'locals', '(', ')'] | *genarate a MMD definition*
**Key Arguments:**
- ``text`` -- the text to define
- ``definition`` -- the definition
**Return:**
- ``definition`` -- the MMD style definition
**Usage:**
To genarate a MMD definition:
.. code-block:: python
text = \"\"\"Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
Also the makers of really great products.\"\"\"
definition = md.definition("Apple", text)
print definition
# OUTPUT:
# Apple
# : Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
# Also the makers of really great products.
# | ['*', 'genarate', 'a', 'MMD', 'definition', '*'] | train | https://github.com/thespacedoctor/polyglot/blob/98038d746aa67e343b73b3ccee1e02d31dab81ec/polyglot/markdown/translate.py#L495-L531 |
564 | frejanordsiek/hdf5storage | hdf5storage/__init__.py | find_thirdparty_marshaller_plugins | def find_thirdparty_marshaller_plugins():
""" Find, but don't load, all third party marshaller plugins.
Third party marshaller plugins declare the entry point
``'hdf5storage.marshallers.plugins'`` with the name being the
Marshaller API version and the target being a function that returns
a ``tuple`` or ``list`` of all the marshallers provided by that
plugin when given the hdf5storage version (``str``) as its only
argument.
.. versionadded:: 0.2
Returns
-------
plugins : dict
The marshaller obtaining entry points from third party
plugins. The keys are the Marshaller API versions (``str``) and
the values are ``dict`` of the entry points, with the module
names as the keys (``str``) and the values being the entry
points (``pkg_resources.EntryPoint``).
See Also
--------
supported_marshaller_api_versions
"""
all_plugins = tuple(pkg_resources.iter_entry_points(
'hdf5storage.marshallers.plugins'))
return {ver: {p.module_name: p
for p in all_plugins if p.name == ver}
for ver in supported_marshaller_api_versions()} | python | def find_thirdparty_marshaller_plugins():
""" Find, but don't load, all third party marshaller plugins.
Third party marshaller plugins declare the entry point
``'hdf5storage.marshallers.plugins'`` with the name being the
Marshaller API version and the target being a function that returns
a ``tuple`` or ``list`` of all the marshallers provided by that
plugin when given the hdf5storage version (``str``) as its only
argument.
.. versionadded:: 0.2
Returns
-------
plugins : dict
The marshaller obtaining entry points from third party
plugins. The keys are the Marshaller API versions (``str``) and
the values are ``dict`` of the entry points, with the module
names as the keys (``str``) and the values being the entry
points (``pkg_resources.EntryPoint``).
See Also
--------
supported_marshaller_api_versions
"""
all_plugins = tuple(pkg_resources.iter_entry_points(
'hdf5storage.marshallers.plugins'))
return {ver: {p.module_name: p
for p in all_plugins if p.name == ver}
for ver in supported_marshaller_api_versions()} | ['def', 'find_thirdparty_marshaller_plugins', '(', ')', ':', 'all_plugins', '=', 'tuple', '(', 'pkg_resources', '.', 'iter_entry_points', '(', "'hdf5storage.marshallers.plugins'", ')', ')', 'return', '{', 'ver', ':', '{', 'p', '.', 'module_name', ':', 'p', 'for', 'p', 'in', 'all_plugins', 'if', 'p', '.', 'name', '==', 'ver', '}', 'for', 'ver', 'in', 'supported_marshaller_api_versions', '(', ')', '}'] | Find, but don't load, all third party marshaller plugins.
Third party marshaller plugins declare the entry point
``'hdf5storage.marshallers.plugins'`` with the name being the
Marshaller API version and the target being a function that returns
a ``tuple`` or ``list`` of all the marshallers provided by that
plugin when given the hdf5storage version (``str``) as its only
argument.
.. versionadded:: 0.2
Returns
-------
plugins : dict
The marshaller obtaining entry points from third party
plugins. The keys are the Marshaller API versions (``str``) and
the values are ``dict`` of the entry points, with the module
names as the keys (``str``) and the values being the entry
points (``pkg_resources.EntryPoint``).
See Also
--------
supported_marshaller_api_versions | ['Find', 'but', 'don', 't', 'load', 'all', 'third', 'party', 'marshaller', 'plugins', '.'] | train | https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/__init__.py#L85-L115 |
565 | wummel/linkchecker | linkcheck/logger/customxml.py | CustomXMLLogger.log_url | def log_url (self, url_data):
"""
Log URL data in custom XML format.
"""
self.xml_starttag(u'urldata')
if self.has_part('url'):
self.xml_tag(u"url", unicode(url_data.base_url))
if url_data.name and self.has_part('name'):
self.xml_tag(u"name", unicode(url_data.name))
if url_data.parent_url and self.has_part('parenturl'):
attrs = {
u'line': u"%d" % url_data.line,
u'column': u"%d" % url_data.column,
}
self.xml_tag(u"parent", unicode(url_data.parent_url),
attrs=attrs)
if url_data.base_ref and self.has_part('base'):
self.xml_tag(u"baseref", unicode(url_data.base_ref))
if self.has_part("realurl"):
self.xml_tag(u"realurl", unicode(url_data.url))
if self.has_part("extern"):
self.xml_tag(u"extern", u"%d" % (1 if url_data.extern else 0))
if url_data.dltime >= 0 and self.has_part("dltime"):
self.xml_tag(u"dltime", u"%f" % url_data.dltime)
if url_data.size >= 0 and self.has_part("dlsize"):
self.xml_tag(u"dlsize", u"%d" % url_data.size)
if url_data.checktime and self.has_part("checktime"):
self.xml_tag(u"checktime", u"%f" % url_data.checktime)
if self.has_part("level"):
self.xml_tag(u"level", u"%d" % url_data.level)
if url_data.info and self.has_part('info'):
self.xml_starttag(u"infos")
for info in url_data.info:
self.xml_tag(u"info", info)
self.xml_endtag(u"infos")
if url_data.modified and self.has_part('modified'):
self.xml_tag(u"modified", self.format_modified(url_data.modified))
if url_data.warnings and self.has_part('warning'):
self.xml_starttag(u"warnings")
for tag, data in url_data.warnings:
attrs = {}
if tag:
attrs["tag"] = tag
self.xml_tag(u"warning", data, attrs)
self.xml_endtag(u"warnings")
if self.has_part("result"):
attrs = {}
if url_data.result:
attrs["result"] = url_data.result
self.xml_tag(u"valid", u"%d" % (1 if url_data.valid else 0), attrs)
self.xml_endtag(u'urldata')
self.flush() | python | def log_url (self, url_data):
"""
Log URL data in custom XML format.
"""
self.xml_starttag(u'urldata')
if self.has_part('url'):
self.xml_tag(u"url", unicode(url_data.base_url))
if url_data.name and self.has_part('name'):
self.xml_tag(u"name", unicode(url_data.name))
if url_data.parent_url and self.has_part('parenturl'):
attrs = {
u'line': u"%d" % url_data.line,
u'column': u"%d" % url_data.column,
}
self.xml_tag(u"parent", unicode(url_data.parent_url),
attrs=attrs)
if url_data.base_ref and self.has_part('base'):
self.xml_tag(u"baseref", unicode(url_data.base_ref))
if self.has_part("realurl"):
self.xml_tag(u"realurl", unicode(url_data.url))
if self.has_part("extern"):
self.xml_tag(u"extern", u"%d" % (1 if url_data.extern else 0))
if url_data.dltime >= 0 and self.has_part("dltime"):
self.xml_tag(u"dltime", u"%f" % url_data.dltime)
if url_data.size >= 0 and self.has_part("dlsize"):
self.xml_tag(u"dlsize", u"%d" % url_data.size)
if url_data.checktime and self.has_part("checktime"):
self.xml_tag(u"checktime", u"%f" % url_data.checktime)
if self.has_part("level"):
self.xml_tag(u"level", u"%d" % url_data.level)
if url_data.info and self.has_part('info'):
self.xml_starttag(u"infos")
for info in url_data.info:
self.xml_tag(u"info", info)
self.xml_endtag(u"infos")
if url_data.modified and self.has_part('modified'):
self.xml_tag(u"modified", self.format_modified(url_data.modified))
if url_data.warnings and self.has_part('warning'):
self.xml_starttag(u"warnings")
for tag, data in url_data.warnings:
attrs = {}
if tag:
attrs["tag"] = tag
self.xml_tag(u"warning", data, attrs)
self.xml_endtag(u"warnings")
if self.has_part("result"):
attrs = {}
if url_data.result:
attrs["result"] = url_data.result
self.xml_tag(u"valid", u"%d" % (1 if url_data.valid else 0), attrs)
self.xml_endtag(u'urldata')
self.flush() | ['def', 'log_url', '(', 'self', ',', 'url_data', ')', ':', 'self', '.', 'xml_starttag', '(', "u'urldata'", ')', 'if', 'self', '.', 'has_part', '(', "'url'", ')', ':', 'self', '.', 'xml_tag', '(', 'u"url"', ',', 'unicode', '(', 'url_data', '.', 'base_url', ')', ')', 'if', 'url_data', '.', 'name', 'and', 'self', '.', 'has_part', '(', "'name'", ')', ':', 'self', '.', 'xml_tag', '(', 'u"name"', ',', 'unicode', '(', 'url_data', '.', 'name', ')', ')', 'if', 'url_data', '.', 'parent_url', 'and', 'self', '.', 'has_part', '(', "'parenturl'", ')', ':', 'attrs', '=', '{', "u'line'", ':', 'u"%d"', '%', 'url_data', '.', 'line', ',', "u'column'", ':', 'u"%d"', '%', 'url_data', '.', 'column', ',', '}', 'self', '.', 'xml_tag', '(', 'u"parent"', ',', 'unicode', '(', 'url_data', '.', 'parent_url', ')', ',', 'attrs', '=', 'attrs', ')', 'if', 'url_data', '.', 'base_ref', 'and', 'self', '.', 'has_part', '(', "'base'", ')', ':', 'self', '.', 'xml_tag', '(', 'u"baseref"', ',', 'unicode', '(', 'url_data', '.', 'base_ref', ')', ')', 'if', 'self', '.', 'has_part', '(', '"realurl"', ')', ':', 'self', '.', 'xml_tag', '(', 'u"realurl"', ',', 'unicode', '(', 'url_data', '.', 'url', ')', ')', 'if', 'self', '.', 'has_part', '(', '"extern"', ')', ':', 'self', '.', 'xml_tag', '(', 'u"extern"', ',', 'u"%d"', '%', '(', '1', 'if', 'url_data', '.', 'extern', 'else', '0', ')', ')', 'if', 'url_data', '.', 'dltime', '>=', '0', 'and', 'self', '.', 'has_part', '(', '"dltime"', ')', ':', 'self', '.', 'xml_tag', '(', 'u"dltime"', ',', 'u"%f"', '%', 'url_data', '.', 'dltime', ')', 'if', 'url_data', '.', 'size', '>=', '0', 'and', 'self', '.', 'has_part', '(', '"dlsize"', ')', ':', 'self', '.', 'xml_tag', '(', 'u"dlsize"', ',', 'u"%d"', '%', 'url_data', '.', 'size', ')', 'if', 'url_data', '.', 'checktime', 'and', 'self', '.', 'has_part', '(', '"checktime"', ')', ':', 'self', '.', 'xml_tag', '(', 'u"checktime"', ',', 'u"%f"', '%', 'url_data', '.', 'checktime', ')', 'if', 'self', '.', 'has_part', '(', '"level"', ')', ':', 'self', '.', 'xml_tag', '(', 'u"level"', ',', 'u"%d"', '%', 'url_data', '.', 'level', ')', 'if', 'url_data', '.', 'info', 'and', 'self', '.', 'has_part', '(', "'info'", ')', ':', 'self', '.', 'xml_starttag', '(', 'u"infos"', ')', 'for', 'info', 'in', 'url_data', '.', 'info', ':', 'self', '.', 'xml_tag', '(', 'u"info"', ',', 'info', ')', 'self', '.', 'xml_endtag', '(', 'u"infos"', ')', 'if', 'url_data', '.', 'modified', 'and', 'self', '.', 'has_part', '(', "'modified'", ')', ':', 'self', '.', 'xml_tag', '(', 'u"modified"', ',', 'self', '.', 'format_modified', '(', 'url_data', '.', 'modified', ')', ')', 'if', 'url_data', '.', 'warnings', 'and', 'self', '.', 'has_part', '(', "'warning'", ')', ':', 'self', '.', 'xml_starttag', '(', 'u"warnings"', ')', 'for', 'tag', ',', 'data', 'in', 'url_data', '.', 'warnings', ':', 'attrs', '=', '{', '}', 'if', 'tag', ':', 'attrs', '[', '"tag"', ']', '=', 'tag', 'self', '.', 'xml_tag', '(', 'u"warning"', ',', 'data', ',', 'attrs', ')', 'self', '.', 'xml_endtag', '(', 'u"warnings"', ')', 'if', 'self', '.', 'has_part', '(', '"result"', ')', ':', 'attrs', '=', '{', '}', 'if', 'url_data', '.', 'result', ':', 'attrs', '[', '"result"', ']', '=', 'url_data', '.', 'result', 'self', '.', 'xml_tag', '(', 'u"valid"', ',', 'u"%d"', '%', '(', '1', 'if', 'url_data', '.', 'valid', 'else', '0', ')', ',', 'attrs', ')', 'self', '.', 'xml_endtag', '(', "u'urldata'", ')', 'self', '.', 'flush', '(', ')'] | Log URL data in custom XML format. | ['Log', 'URL', 'data', 'in', 'custom', 'XML', 'format', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/customxml.py#L45-L96 |
566 | alexa/alexa-skills-kit-sdk-for-python | ask-sdk-dynamodb-persistence-adapter/ask_sdk_dynamodb/adapter.py | DynamoDbAdapter.__create_table_if_not_exists | def __create_table_if_not_exists(self):
# type: () -> None
"""Creates table in Dynamodb resource if it doesn't exist and
create_table is set as True.
:rtype: None
:raises: PersistenceException: When `create_table` fails on
dynamodb resource.
"""
if self.create_table:
try:
self.dynamodb.create_table(
TableName=self.table_name,
KeySchema=[
{
'AttributeName': self.partition_key_name,
'KeyType': 'HASH'
}
],
AttributeDefinitions=[
{
'AttributeName': self.partition_key_name,
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
except Exception as e:
if e.__class__.__name__ != "ResourceInUseException":
raise PersistenceException(
"Create table if not exists request "
"failed: Exception of type {} "
"occurred: {}".format(
type(e).__name__, str(e))) | python | def __create_table_if_not_exists(self):
# type: () -> None
"""Creates table in Dynamodb resource if it doesn't exist and
create_table is set as True.
:rtype: None
:raises: PersistenceException: When `create_table` fails on
dynamodb resource.
"""
if self.create_table:
try:
self.dynamodb.create_table(
TableName=self.table_name,
KeySchema=[
{
'AttributeName': self.partition_key_name,
'KeyType': 'HASH'
}
],
AttributeDefinitions=[
{
'AttributeName': self.partition_key_name,
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
except Exception as e:
if e.__class__.__name__ != "ResourceInUseException":
raise PersistenceException(
"Create table if not exists request "
"failed: Exception of type {} "
"occurred: {}".format(
type(e).__name__, str(e))) | ['def', '__create_table_if_not_exists', '(', 'self', ')', ':', '# type: () -> None', 'if', 'self', '.', 'create_table', ':', 'try', ':', 'self', '.', 'dynamodb', '.', 'create_table', '(', 'TableName', '=', 'self', '.', 'table_name', ',', 'KeySchema', '=', '[', '{', "'AttributeName'", ':', 'self', '.', 'partition_key_name', ',', "'KeyType'", ':', "'HASH'", '}', ']', ',', 'AttributeDefinitions', '=', '[', '{', "'AttributeName'", ':', 'self', '.', 'partition_key_name', ',', "'AttributeType'", ':', "'S'", '}', ']', ',', 'ProvisionedThroughput', '=', '{', "'ReadCapacityUnits'", ':', '5', ',', "'WriteCapacityUnits'", ':', '5', '}', ')', 'except', 'Exception', 'as', 'e', ':', 'if', 'e', '.', '__class__', '.', '__name__', '!=', '"ResourceInUseException"', ':', 'raise', 'PersistenceException', '(', '"Create table if not exists request "', '"failed: Exception of type {} "', '"occurred: {}"', '.', 'format', '(', 'type', '(', 'e', ')', '.', '__name__', ',', 'str', '(', 'e', ')', ')', ')'] | Creates table in Dynamodb resource if it doesn't exist and
create_table is set as True.
:rtype: None
:raises: PersistenceException: When `create_table` fails on
dynamodb resource. | ['Creates', 'table', 'in', 'Dynamodb', 'resource', 'if', 'it', 'doesn', 't', 'exist', 'and', 'create_table', 'is', 'set', 'as', 'True', '.'] | train | https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-dynamodb-persistence-adapter/ask_sdk_dynamodb/adapter.py#L207-L244 |
567 | litters/shrew | shrew/utils/auth.py | FixedOSXKeychain.delete_password | def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain") | python | def delete_password(service, username):
"""Delete the password for the username of the service.
"""
try:
# set up the call for security.
call = subprocess.Popen(['security',
'delete-generic-password',
'-a',
username,
'-s',
service],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
_, _ = call.communicate()
code = call.returncode
# check return code.
if code is not 0:
raise PasswordSetError('Can\'t delete password in keychain')
except:
raise PasswordSetError("Can't delete password in keychain") | ['def', 'delete_password', '(', 'service', ',', 'username', ')', ':', 'try', ':', '# set up the call for security.', 'call', '=', 'subprocess', '.', 'Popen', '(', '[', "'security'", ',', "'delete-generic-password'", ',', "'-a'", ',', 'username', ',', "'-s'", ',', 'service', ']', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ')', '_', ',', '_', '=', 'call', '.', 'communicate', '(', ')', 'code', '=', 'call', '.', 'returncode', '# check return code.', 'if', 'code', 'is', 'not', '0', ':', 'raise', 'PasswordSetError', '(', "'Can\\'t delete password in keychain'", ')', 'except', ':', 'raise', 'PasswordSetError', '(', '"Can\'t delete password in keychain"', ')'] | Delete the password for the username of the service. | ['Delete', 'the', 'password', 'for', 'the', 'username', 'of', 'the', 'service', '.'] | train | https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L60-L79 |
568 | ThreatConnect-Inc/tcex | tcex/tcex_bin_run.py | TcExRun.data_kva_compare | def data_kva_compare(db_data, user_data):
"""Validate key/value data in KeyValueArray.
Args:
db_data (list): The data store in Redis.
user_data (dict): The user provided data.
Returns:
bool: True if the data passed validation.
"""
for kv_data in db_data:
if kv_data.get('key') == user_data.get('key'):
if kv_data.get('value') == user_data.get('value'):
return True
return False | python | def data_kva_compare(db_data, user_data):
"""Validate key/value data in KeyValueArray.
Args:
db_data (list): The data store in Redis.
user_data (dict): The user provided data.
Returns:
bool: True if the data passed validation.
"""
for kv_data in db_data:
if kv_data.get('key') == user_data.get('key'):
if kv_data.get('value') == user_data.get('value'):
return True
return False | ['def', 'data_kva_compare', '(', 'db_data', ',', 'user_data', ')', ':', 'for', 'kv_data', 'in', 'db_data', ':', 'if', 'kv_data', '.', 'get', '(', "'key'", ')', '==', 'user_data', '.', 'get', '(', "'key'", ')', ':', 'if', 'kv_data', '.', 'get', '(', "'value'", ')', '==', 'user_data', '.', 'get', '(', "'value'", ')', ':', 'return', 'True', 'return', 'False'] | Validate key/value data in KeyValueArray.
Args:
db_data (list): The data store in Redis.
user_data (dict): The user provided data.
Returns:
bool: True if the data passed validation. | ['Validate', 'key', '/', 'value', 'data', 'in', 'KeyValueArray', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L428-L442 |
569 | miLibris/flask-rest-jsonapi | flask_rest_jsonapi/api.py | Api.oauth_manager | def oauth_manager(self, oauth_manager):
"""Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager
"""
@self.app.before_request
def before_request():
endpoint = request.endpoint
resource = self.app.view_functions[endpoint].view_class
if not getattr(resource, 'disable_oauth'):
scopes = request.args.get('scopes')
if getattr(resource, 'schema'):
scopes = [self.build_scope(resource, request.method)]
elif scopes:
scopes = scopes.split(',')
if scopes:
scopes = scopes.split(',')
valid, req = oauth_manager.verify_request(scopes)
for func in oauth_manager._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if oauth_manager._invalid_response:
return oauth_manager._invalid_response(req)
return abort(401)
request.oauth = req | python | def oauth_manager(self, oauth_manager):
"""Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager
"""
@self.app.before_request
def before_request():
endpoint = request.endpoint
resource = self.app.view_functions[endpoint].view_class
if not getattr(resource, 'disable_oauth'):
scopes = request.args.get('scopes')
if getattr(resource, 'schema'):
scopes = [self.build_scope(resource, request.method)]
elif scopes:
scopes = scopes.split(',')
if scopes:
scopes = scopes.split(',')
valid, req = oauth_manager.verify_request(scopes)
for func in oauth_manager._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if oauth_manager._invalid_response:
return oauth_manager._invalid_response(req)
return abort(401)
request.oauth = req | ['def', 'oauth_manager', '(', 'self', ',', 'oauth_manager', ')', ':', '@', 'self', '.', 'app', '.', 'before_request', 'def', 'before_request', '(', ')', ':', 'endpoint', '=', 'request', '.', 'endpoint', 'resource', '=', 'self', '.', 'app', '.', 'view_functions', '[', 'endpoint', ']', '.', 'view_class', 'if', 'not', 'getattr', '(', 'resource', ',', "'disable_oauth'", ')', ':', 'scopes', '=', 'request', '.', 'args', '.', 'get', '(', "'scopes'", ')', 'if', 'getattr', '(', 'resource', ',', "'schema'", ')', ':', 'scopes', '=', '[', 'self', '.', 'build_scope', '(', 'resource', ',', 'request', '.', 'method', ')', ']', 'elif', 'scopes', ':', 'scopes', '=', 'scopes', '.', 'split', '(', "','", ')', 'if', 'scopes', ':', 'scopes', '=', 'scopes', '.', 'split', '(', "','", ')', 'valid', ',', 'req', '=', 'oauth_manager', '.', 'verify_request', '(', 'scopes', ')', 'for', 'func', 'in', 'oauth_manager', '.', '_after_request_funcs', ':', 'valid', ',', 'req', '=', 'func', '(', 'valid', ',', 'req', ')', 'if', 'not', 'valid', ':', 'if', 'oauth_manager', '.', '_invalid_response', ':', 'return', 'oauth_manager', '.', '_invalid_response', '(', 'req', ')', 'return', 'abort', '(', '401', ')', 'request', '.', 'oauth', '=', 'req'] | Use the oauth manager to enable oauth for API
:param oauth_manager: the oauth manager | ['Use', 'the', 'oauth', 'manager', 'to', 'enable', 'oauth', 'for', 'API'] | train | https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/api.py#L93-L124 |
570 | sixty-north/cosmic-ray | src/cosmic_ray/interceptors/spor.py | _item_in_context | def _item_in_context(lines, item, context):
"""Determines if a WorkItem falls within an anchor.
This only returns True if a WorkItems start-/stop-pos range is *completely*
within an anchor, not just if it overalaps.
"""
start_offset = _line_and_col_to_offset(lines, item.start_pos[0],
item.start_pos[1])
stop_offset = _line_and_col_to_offset(lines, item.end_pos[0],
item.end_pos[1])
width = stop_offset - start_offset
return start_offset >= context.offset and width <= len(context.topic) | python | def _item_in_context(lines, item, context):
"""Determines if a WorkItem falls within an anchor.
This only returns True if a WorkItems start-/stop-pos range is *completely*
within an anchor, not just if it overalaps.
"""
start_offset = _line_and_col_to_offset(lines, item.start_pos[0],
item.start_pos[1])
stop_offset = _line_and_col_to_offset(lines, item.end_pos[0],
item.end_pos[1])
width = stop_offset - start_offset
return start_offset >= context.offset and width <= len(context.topic) | ['def', '_item_in_context', '(', 'lines', ',', 'item', ',', 'context', ')', ':', 'start_offset', '=', '_line_and_col_to_offset', '(', 'lines', ',', 'item', '.', 'start_pos', '[', '0', ']', ',', 'item', '.', 'start_pos', '[', '1', ']', ')', 'stop_offset', '=', '_line_and_col_to_offset', '(', 'lines', ',', 'item', '.', 'end_pos', '[', '0', ']', ',', 'item', '.', 'end_pos', '[', '1', ']', ')', 'width', '=', 'stop_offset', '-', 'start_offset', 'return', 'start_offset', '>=', 'context', '.', 'offset', 'and', 'width', '<=', 'len', '(', 'context', '.', 'topic', ')'] | Determines if a WorkItem falls within an anchor.
This only returns True if a WorkItems start-/stop-pos range is *completely*
within an anchor, not just if it overalaps. | ['Determines', 'if', 'a', 'WorkItem', 'falls', 'within', 'an', 'anchor', '.'] | train | https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/interceptors/spor.py#L93-L105 |
571 | fermiPy/fermipy | fermipy/diffuse/gt_srcmap_partial.py | SrcmapsDiffuse_SG._handle_component | def _handle_component(sourcekey, comp_dict):
"""Make the source objects and write the xml for a component
"""
if comp_dict.comp_key is None:
fullkey = sourcekey
else:
fullkey = "%s_%s" % (sourcekey, comp_dict.comp_key)
srcdict = make_sources(fullkey, comp_dict)
if comp_dict.model_type == 'IsoSource':
print("Writing xml for %s to %s: %s %s" % (fullkey,
comp_dict.srcmdl_name,
comp_dict.model_type,
comp_dict.Spectral_Filename))
elif comp_dict.model_type == 'MapCubeSource':
print("Writing xml for %s to %s: %s %s" % (fullkey,
comp_dict.srcmdl_name,
comp_dict.model_type,
comp_dict.Spatial_Filename))
SrcmapsDiffuse_SG._write_xml(comp_dict.srcmdl_name, srcdict.values()) | python | def _handle_component(sourcekey, comp_dict):
"""Make the source objects and write the xml for a component
"""
if comp_dict.comp_key is None:
fullkey = sourcekey
else:
fullkey = "%s_%s" % (sourcekey, comp_dict.comp_key)
srcdict = make_sources(fullkey, comp_dict)
if comp_dict.model_type == 'IsoSource':
print("Writing xml for %s to %s: %s %s" % (fullkey,
comp_dict.srcmdl_name,
comp_dict.model_type,
comp_dict.Spectral_Filename))
elif comp_dict.model_type == 'MapCubeSource':
print("Writing xml for %s to %s: %s %s" % (fullkey,
comp_dict.srcmdl_name,
comp_dict.model_type,
comp_dict.Spatial_Filename))
SrcmapsDiffuse_SG._write_xml(comp_dict.srcmdl_name, srcdict.values()) | ['def', '_handle_component', '(', 'sourcekey', ',', 'comp_dict', ')', ':', 'if', 'comp_dict', '.', 'comp_key', 'is', 'None', ':', 'fullkey', '=', 'sourcekey', 'else', ':', 'fullkey', '=', '"%s_%s"', '%', '(', 'sourcekey', ',', 'comp_dict', '.', 'comp_key', ')', 'srcdict', '=', 'make_sources', '(', 'fullkey', ',', 'comp_dict', ')', 'if', 'comp_dict', '.', 'model_type', '==', "'IsoSource'", ':', 'print', '(', '"Writing xml for %s to %s: %s %s"', '%', '(', 'fullkey', ',', 'comp_dict', '.', 'srcmdl_name', ',', 'comp_dict', '.', 'model_type', ',', 'comp_dict', '.', 'Spectral_Filename', ')', ')', 'elif', 'comp_dict', '.', 'model_type', '==', "'MapCubeSource'", ':', 'print', '(', '"Writing xml for %s to %s: %s %s"', '%', '(', 'fullkey', ',', 'comp_dict', '.', 'srcmdl_name', ',', 'comp_dict', '.', 'model_type', ',', 'comp_dict', '.', 'Spatial_Filename', ')', ')', 'SrcmapsDiffuse_SG', '.', '_write_xml', '(', 'comp_dict', '.', 'srcmdl_name', ',', 'srcdict', '.', 'values', '(', ')', ')'] | Make the source objects and write the xml for a component | ['Make', 'the', 'source', 'objects', 'and', 'write', 'the', 'xml', 'for', 'a', 'component'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_srcmap_partial.py#L141-L159 |
572 | mlperf/training | translation/tensorflow/transformer/transformer_main.py | get_learning_rate | def get_learning_rate(learning_rate, hidden_size, learning_rate_warmup_steps):
"""Calculate learning rate with linear warmup and rsqrt decay."""
with tf.name_scope("learning_rate"):
warmup_steps = tf.to_float(learning_rate_warmup_steps)
step = tf.to_float(tf.train.get_or_create_global_step())
learning_rate *= (hidden_size ** -0.5)
# Apply linear warmup
learning_rate *= tf.minimum(1.0, step / warmup_steps)
# Apply rsqrt decay
learning_rate *= tf.rsqrt(tf.maximum(step, warmup_steps))
# Save learning rate value to TensorBoard summary.
tf.summary.scalar("learning_rate", learning_rate)
return learning_rate | python | def get_learning_rate(learning_rate, hidden_size, learning_rate_warmup_steps):
"""Calculate learning rate with linear warmup and rsqrt decay."""
with tf.name_scope("learning_rate"):
warmup_steps = tf.to_float(learning_rate_warmup_steps)
step = tf.to_float(tf.train.get_or_create_global_step())
learning_rate *= (hidden_size ** -0.5)
# Apply linear warmup
learning_rate *= tf.minimum(1.0, step / warmup_steps)
# Apply rsqrt decay
learning_rate *= tf.rsqrt(tf.maximum(step, warmup_steps))
# Save learning rate value to TensorBoard summary.
tf.summary.scalar("learning_rate", learning_rate)
return learning_rate | ['def', 'get_learning_rate', '(', 'learning_rate', ',', 'hidden_size', ',', 'learning_rate_warmup_steps', ')', ':', 'with', 'tf', '.', 'name_scope', '(', '"learning_rate"', ')', ':', 'warmup_steps', '=', 'tf', '.', 'to_float', '(', 'learning_rate_warmup_steps', ')', 'step', '=', 'tf', '.', 'to_float', '(', 'tf', '.', 'train', '.', 'get_or_create_global_step', '(', ')', ')', 'learning_rate', '*=', '(', 'hidden_size', '**', '-', '0.5', ')', '# Apply linear warmup', 'learning_rate', '*=', 'tf', '.', 'minimum', '(', '1.0', ',', 'step', '/', 'warmup_steps', ')', '# Apply rsqrt decay', 'learning_rate', '*=', 'tf', '.', 'rsqrt', '(', 'tf', '.', 'maximum', '(', 'step', ',', 'warmup_steps', ')', ')', '# Save learning rate value to TensorBoard summary.', 'tf', '.', 'summary', '.', 'scalar', '(', '"learning_rate"', ',', 'learning_rate', ')', 'return', 'learning_rate'] | Calculate learning rate with linear warmup and rsqrt decay. | ['Calculate', 'learning', 'rate', 'with', 'linear', 'warmup', 'and', 'rsqrt', 'decay', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/transformer_main.py#L82-L97 |
573 | zyga/guacamole | examples/rainbow.py | ANSIDemo.invoked | def invoked(self, ctx):
"""Method called when the command is invoked."""
if not ctx.ansi.is_enabled:
print("You need color support to use this demo")
else:
print(ctx.ansi.cmd('erase_display'))
self._demo_fg_color(ctx)
self._demo_bg_color(ctx)
self._demo_bg_indexed(ctx)
self._demo_rgb(ctx)
self._demo_style(ctx) | python | def invoked(self, ctx):
"""Method called when the command is invoked."""
if not ctx.ansi.is_enabled:
print("You need color support to use this demo")
else:
print(ctx.ansi.cmd('erase_display'))
self._demo_fg_color(ctx)
self._demo_bg_color(ctx)
self._demo_bg_indexed(ctx)
self._demo_rgb(ctx)
self._demo_style(ctx) | ['def', 'invoked', '(', 'self', ',', 'ctx', ')', ':', 'if', 'not', 'ctx', '.', 'ansi', '.', 'is_enabled', ':', 'print', '(', '"You need color support to use this demo"', ')', 'else', ':', 'print', '(', 'ctx', '.', 'ansi', '.', 'cmd', '(', "'erase_display'", ')', ')', 'self', '.', '_demo_fg_color', '(', 'ctx', ')', 'self', '.', '_demo_bg_color', '(', 'ctx', ')', 'self', '.', '_demo_bg_indexed', '(', 'ctx', ')', 'self', '.', '_demo_rgb', '(', 'ctx', ')', 'self', '.', '_demo_style', '(', 'ctx', ')'] | Method called when the command is invoked. | ['Method', 'called', 'when', 'the', 'command', 'is', 'invoked', '.'] | train | https://github.com/zyga/guacamole/blob/105c10a798144e3b89659b500d7c2b84b0c76546/examples/rainbow.py#L45-L55 |
574 | junaruga/rpm-py-installer | install.py | Cmd.tar_extract | def tar_extract(cls, tar_comp_file_path):
"""Extract tar.gz or tar bz2 file.
It behaves like
- tar xzf tar_gz_file_path
- tar xjf tar_bz2_file_path
It raises tarfile.ReadError if the file is broken.
"""
try:
with contextlib.closing(tarfile.open(tar_comp_file_path)) as tar:
tar.extractall()
except tarfile.ReadError as e:
message_format = (
'Extract failed: '
'tar_comp_file_path: {0}, reason: {1}'
)
raise InstallError(message_format.format(tar_comp_file_path, e)) | python | def tar_extract(cls, tar_comp_file_path):
"""Extract tar.gz or tar bz2 file.
It behaves like
- tar xzf tar_gz_file_path
- tar xjf tar_bz2_file_path
It raises tarfile.ReadError if the file is broken.
"""
try:
with contextlib.closing(tarfile.open(tar_comp_file_path)) as tar:
tar.extractall()
except tarfile.ReadError as e:
message_format = (
'Extract failed: '
'tar_comp_file_path: {0}, reason: {1}'
)
raise InstallError(message_format.format(tar_comp_file_path, e)) | ['def', 'tar_extract', '(', 'cls', ',', 'tar_comp_file_path', ')', ':', 'try', ':', 'with', 'contextlib', '.', 'closing', '(', 'tarfile', '.', 'open', '(', 'tar_comp_file_path', ')', ')', 'as', 'tar', ':', 'tar', '.', 'extractall', '(', ')', 'except', 'tarfile', '.', 'ReadError', 'as', 'e', ':', 'message_format', '=', '(', "'Extract failed: '", "'tar_comp_file_path: {0}, reason: {1}'", ')', 'raise', 'InstallError', '(', 'message_format', '.', 'format', '(', 'tar_comp_file_path', ',', 'e', ')', ')'] | Extract tar.gz or tar bz2 file.
It behaves like
- tar xzf tar_gz_file_path
- tar xjf tar_bz2_file_path
It raises tarfile.ReadError if the file is broken. | ['Extract', 'tar', '.', 'gz', 'or', 'tar', 'bz2', 'file', '.'] | train | https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L1841-L1857 |
575 | clusterpoint/python-client-api | pycps/converters.py | to_raw_xml | def to_raw_xml(source):
""" Convert various representations of an XML structure to a normal XML string.
Args:
source -- The source object to be converted - ET.Element, dict or string.
Returns:
A rew xml string matching the source object.
>>> to_raw_xml("<content/>")
'<content/>'
>>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}})
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
>>> to_raw_xml(ET.Element('root'))
'<root/>'
"""
if isinstance(source, basestring):
return source
elif hasattr(source, 'getiterator'): # Element or ElementTree.
return ET.tostring(source, encoding="utf-8")
elif hasattr(source, 'keys'): # Dict.
xml_root = dict_to_etree(source)
return ET.tostring(xml_root, encoding="utf-8")
else:
raise TypeError("Accepted representations of a document are string, dict and etree") | python | def to_raw_xml(source):
""" Convert various representations of an XML structure to a normal XML string.
Args:
source -- The source object to be converted - ET.Element, dict or string.
Returns:
A rew xml string matching the source object.
>>> to_raw_xml("<content/>")
'<content/>'
>>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}})
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
>>> to_raw_xml(ET.Element('root'))
'<root/>'
"""
if isinstance(source, basestring):
return source
elif hasattr(source, 'getiterator'): # Element or ElementTree.
return ET.tostring(source, encoding="utf-8")
elif hasattr(source, 'keys'): # Dict.
xml_root = dict_to_etree(source)
return ET.tostring(xml_root, encoding="utf-8")
else:
raise TypeError("Accepted representations of a document are string, dict and etree") | ['def', 'to_raw_xml', '(', 'source', ')', ':', 'if', 'isinstance', '(', 'source', ',', 'basestring', ')', ':', 'return', 'source', 'elif', 'hasattr', '(', 'source', ',', "'getiterator'", ')', ':', '# Element or ElementTree.', 'return', 'ET', '.', 'tostring', '(', 'source', ',', 'encoding', '=', '"utf-8"', ')', 'elif', 'hasattr', '(', 'source', ',', "'keys'", ')', ':', '# Dict.', 'xml_root', '=', 'dict_to_etree', '(', 'source', ')', 'return', 'ET', '.', 'tostring', '(', 'xml_root', ',', 'encoding', '=', '"utf-8"', ')', 'else', ':', 'raise', 'TypeError', '(', '"Accepted representations of a document are string, dict and etree"', ')'] | Convert various representations of an XML structure to a normal XML string.
Args:
source -- The source object to be converted - ET.Element, dict or string.
Returns:
A rew xml string matching the source object.
>>> to_raw_xml("<content/>")
'<content/>'
>>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}})
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
>>> to_raw_xml(ET.Element('root'))
'<root/>' | ['Convert', 'various', 'representations', 'of', 'an', 'XML', 'structure', 'to', 'a', 'normal', 'XML', 'string', '.'] | train | https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/converters.py#L178-L204 |
576 | mozillazg/baidu-pcs-python-sdk | baidupcs/api.py | PCS.list_download_tasks | def list_download_tasks(self, need_task_info=1, start=0, limit=10, asc=0,
create_time=None, status=None, source_url=None,
remote_path=None, expires=None, **kwargs):
"""查询离线下载任务ID列表及任务信息.
:param need_task_info: 是否需要返回任务信息:
* 0:不需要
* 1:需要,默认为1
:param start: 查询任务起始位置,默认为0。
:param limit: 设定返回任务数量,默认为10。
:param asc:
* 0:降序,默认值
* 1:升序
:param create_time: 任务创建时间,默认为空。
:type create_time: int
:param status: 任务状态,默认为空。
0:下载成功,1:下载进行中 2:系统错误,3:资源不存在,
4:下载超时,5:资源存在但下载失败, 6:存储空间不足,
7:目标地址数据已存在, 8:任务取消.
:type status: int
:param source_url: 源地址URL,默认为空。
:param remote_path: 文件保存路径,默认为空。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'start': start,
'limit': limit,
'asc': asc,
'source_url': source_url,
'save_path': remote_path,
'create_time': create_time,
'status': status,
'need_task_info': need_task_info,
}
return self._request('services/cloud_dl', 'list_task',
data=data, **kwargs) | python | def list_download_tasks(self, need_task_info=1, start=0, limit=10, asc=0,
create_time=None, status=None, source_url=None,
remote_path=None, expires=None, **kwargs):
"""查询离线下载任务ID列表及任务信息.
:param need_task_info: 是否需要返回任务信息:
* 0:不需要
* 1:需要,默认为1
:param start: 查询任务起始位置,默认为0。
:param limit: 设定返回任务数量,默认为10。
:param asc:
* 0:降序,默认值
* 1:升序
:param create_time: 任务创建时间,默认为空。
:type create_time: int
:param status: 任务状态,默认为空。
0:下载成功,1:下载进行中 2:系统错误,3:资源不存在,
4:下载超时,5:资源存在但下载失败, 6:存储空间不足,
7:目标地址数据已存在, 8:任务取消.
:type status: int
:param source_url: 源地址URL,默认为空。
:param remote_path: 文件保存路径,默认为空。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'start': start,
'limit': limit,
'asc': asc,
'source_url': source_url,
'save_path': remote_path,
'create_time': create_time,
'status': status,
'need_task_info': need_task_info,
}
return self._request('services/cloud_dl', 'list_task',
data=data, **kwargs) | ['def', 'list_download_tasks', '(', 'self', ',', 'need_task_info', '=', '1', ',', 'start', '=', '0', ',', 'limit', '=', '10', ',', 'asc', '=', '0', ',', 'create_time', '=', 'None', ',', 'status', '=', 'None', ',', 'source_url', '=', 'None', ',', 'remote_path', '=', 'None', ',', 'expires', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'data', '=', '{', "'expires'", ':', 'expires', ',', "'start'", ':', 'start', ',', "'limit'", ':', 'limit', ',', "'asc'", ':', 'asc', ',', "'source_url'", ':', 'source_url', ',', "'save_path'", ':', 'remote_path', ',', "'create_time'", ':', 'create_time', ',', "'status'", ':', 'status', ',', "'need_task_info'", ':', 'need_task_info', ',', '}', 'return', 'self', '.', '_request', '(', "'services/cloud_dl'", ',', "'list_task'", ',', 'data', '=', 'data', ',', '*', '*', 'kwargs', ')'] | 查询离线下载任务ID列表及任务信息.
:param need_task_info: 是否需要返回任务信息:
* 0:不需要
* 1:需要,默认为1
:param start: 查询任务起始位置,默认为0。
:param limit: 设定返回任务数量,默认为10。
:param asc:
* 0:降序,默认值
* 1:升序
:param create_time: 任务创建时间,默认为空。
:type create_time: int
:param status: 任务状态,默认为空。
0:下载成功,1:下载进行中 2:系统错误,3:资源不存在,
4:下载超时,5:资源存在但下载失败, 6:存储空间不足,
7:目标地址数据已存在, 8:任务取消.
:type status: int
:param source_url: 源地址URL,默认为空。
:param remote_path: 文件保存路径,默认为空。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 | ['查询离线下载任务ID列表及任务信息', '.'] | train | https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L791-L839 |
577 | zrnsm/pyculiarity | pyculiarity/detect_vec.py | detect_vec | def detect_vec(df, max_anoms=0.10, direction='pos',
alpha=0.05, period=None, only_last=False,
threshold=None, e_value=False, longterm_period=None,
plot=False, y_log=False, xlabel='', ylabel='count',
title=None, verbose=False):
"""
Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of observations.
Args:
x: Time series as a column data frame, list, or vector, where the column consists of
the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
period: Defines the number of observations in a single period, and used during seasonal
decomposition.
only_last: Find and report anomalies only within the last period in the time series.
threshold: Only report positive going anoms above the threshold specified. Options are: ('None' | 'med_max' | 'p95' | 'p99').
e_value: Add an additional column to the anoms output containing the expected value.
longterm_period: Defines the number of observations for which the trend can be considered
flat. The value should be an integer multiple of the number of observations in a single period.
This increases anom detection efficacy for time series that are greater than a month.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm_period' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing index, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series.
"""
if (isinstance(df, DataFrame) and
len(df.columns) == 1 and
df.iloc[:,0].applymap(np.isreal).all(1)):
d = {
'timestamp': range(len(df.iloc[:,0])),
'value': df.iloc[:,0]
}
df = DataFrame(d, index=d['timestamp'])
elif isinstance(df, Series):
d = {
'timestamp': range(len(df)),
'value': df
}
df = DataFrame(d, index=d['timestamp'])
else:
raise ValueError(("data must be a single data frame, "
"list, or vector that holds numeric values."))
if max_anoms > 0.49:
length = len(df.value)
raise ValueError(
("max_anoms must be less than 50% of "
"the data points (max_anoms =%f data_points =%s).")
% (round(max_anoms * length, 0), length))
if not direction in ['pos', 'neg', 'both']:
raise ValueError("direction options are: pos | neg | both.")
if not (0.01 <= alpha or alpha <= 0.1):
if verbose:
import warnings
warnings.warn(("alpha is the statistical signifigance, "
"and is usually between 0.01 and 0.1"))
if not period:
raise ValueError(("Period must be set to the number "
"of data points in a single period"))
if not isinstance(only_last, bool):
raise ValueError("only_last must be a boolean")
if not threshold in [None,'med_max','p95','p99']:
raise ValueError("threshold options are: None | med_max | p95 | p99")
if not isinstance(e_value, bool):
raise ValueError("e_value must be a boolean")
if not isinstance(plot, bool):
raise ValueError("plot must be a boolean")
if not isinstance(y_log, bool):
raise ValueError("y_log must be a boolean")
if not isinstance(xlabel, string_types):
raise ValueError("xlabel must be a string")
if not isinstance(ylabel, string_types):
raise ValueError("ylabel must be a string")
if title and not isinstance(title, string_types):
raise ValueError("title must be a string")
if not title:
title = ''
else:
title = title + " : "
# -- Main analysis: Perform S-H-ESD
num_obs = len(df.value)
clamp = (1 / float(num_obs))
if max_anoms < clamp:
max_anoms = clamp
# -- Setup for longterm time series
# If longterm is enabled, break the data into subset
# data frames and store in all_data,
if longterm_period:
all_data = []
for j in range(0, len(df.timestamp), longterm_period):
start_index = df.timestamp.iloc[j]
end_index = min((start_index + longterm_period), num_obs)
if (end_index - start_index) == longterm_period:
sub_df = df[(df.timestamp >= start_index)
& (df.timestamp <= end_index)]
else:
sub_df = df[(df.timestamp >= (num_obs - longterm_period)) &
(df.timestamp <= num_obs)]
all_data.append(sub_df)
else:
all_data = [df]
# Create empty data frames to store all anoms and
# seasonal+trend component from decomposition
all_anoms = DataFrame(columns=['timestamp', 'value'])
seasonal_plus_trend = DataFrame(columns=['timestamp', 'value'])
# Detect anomalies on all data (either entire data in one-pass,
# or in 2 week blocks if longterm=TRUE)
for i in range(len(all_data)):
directions = {
'pos': Direction(True, True),
'neg': Direction(True, False),
'both': Direction(False, True)
}
anomaly_direction = directions[direction]
s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms,
alpha=alpha,
num_obs_per_period=period,
use_decomp=True,
one_tail=anomaly_direction.one_tail,
upper_tail=anomaly_direction.upper_tail,
verbose=verbose)
# store decomposed components in local variable and
# overwrite s_h_esd_timestamps to contain only the anom timestamps
data_decomp = s_h_esd_timestamps['stl']
s_h_esd_timestamps = s_h_esd_timestamps['anoms']
# -- Step 3: Use detected anomaly timestamps to
# extract the actual anomalies (timestamp and value) from the data
if s_h_esd_timestamps:
anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
else:
anoms = DataFrame(columns=['timestamp', 'value'])
# Filter the anomalies using one of the thresholding
# functions if applicable
if threshold:
# Calculate daily max values
if isinstance(all_data[i].index[0], Timestamp):
group = all_data[i].timestamp.map(Timestamp.date)
else:
group = all_data[i].timestamp.map(lambda t: int(t / period))
periodic_maxes = df.groupby(group).aggregate(np.max).value
# Calculate the threshold set by the user
if threshold == 'med_max':
thresh = periodic_maxes.median()
elif threshold == 'p95':
thresh = periodic_maxes.quantile(.95)
elif threshold == 'p99':
thresh = periodic_maxes.quantile(.99)
# Remove any anoms below the threshold
anoms = anoms[anoms.value >= thresh]
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)
# Cleanup potential duplicates
try:
all_anoms.drop_duplicates(subset=['timestamp'])
seasonal_plus_trend.drop_duplicates(subset=['timestamp'])
except TypeError:
all_anoms.drop_duplicates(cols=['timestamp'])
seasonal_plus_trend.drop_duplicates(cols=['timestamp'])
# -- If only_last was set by the user, create subset of
# the data that represent the most recent period
if only_last:
d = {
'timestamp': df.timestamp.iloc[-period:],
'value': df.value.iloc[-period:]
}
x_subset_single_period = DataFrame(d, index = d['timestamp'])
past_obs = period * 7
if num_obs < past_obs:
past_obs = num_obs - period
# When plotting anoms for the last period only we only show
# the previous 7 periods of data
d = {
'timestamp': df.timestamp.iloc[-past_obs:-period],
'value': df.value.iloc[-past_obs:-period]
}
x_subset_previous = DataFrame(d, index=d['timestamp'])
all_anoms = all_anoms[all_anoms.timestamp
>= x_subset_single_period.timestamp.iloc[0]]
num_obs = len(x_subset_single_period.value)
# Calculate number of anomalies as a percentage
anom_pct = (len(df.value) / float(num_obs)) * 100
if anom_pct == 0:
return {
"anoms": None,
"plot": None
}
# The original R implementation handles plotting here.
# Plotting is currently not implemented.
# if plot:
# plot_something()
all_anoms.index = all_anoms.timestamp
if e_value:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value,
'expected_value': seasonal_plus_trend[
seasonal_plus_trend.timestamp.isin(
all_anoms.timestamp)].value
}
else:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value
}
anoms = DataFrame(d, index=d['timestamp'].index)
return {
'anoms': anoms,
'plot': None
} | python | def detect_vec(df, max_anoms=0.10, direction='pos',
alpha=0.05, period=None, only_last=False,
threshold=None, e_value=False, longterm_period=None,
plot=False, y_log=False, xlabel='', ylabel='count',
title=None, verbose=False):
"""
Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of observations.
Args:
x: Time series as a column data frame, list, or vector, where the column consists of
the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
period: Defines the number of observations in a single period, and used during seasonal
decomposition.
only_last: Find and report anomalies only within the last period in the time series.
threshold: Only report positive going anoms above the threshold specified. Options are: ('None' | 'med_max' | 'p95' | 'p99').
e_value: Add an additional column to the anoms output containing the expected value.
longterm_period: Defines the number of observations for which the trend can be considered
flat. The value should be an integer multiple of the number of observations in a single period.
This increases anom detection efficacy for time series that are greater than a month.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm_period' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing index, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series.
"""
if (isinstance(df, DataFrame) and
len(df.columns) == 1 and
df.iloc[:,0].applymap(np.isreal).all(1)):
d = {
'timestamp': range(len(df.iloc[:,0])),
'value': df.iloc[:,0]
}
df = DataFrame(d, index=d['timestamp'])
elif isinstance(df, Series):
d = {
'timestamp': range(len(df)),
'value': df
}
df = DataFrame(d, index=d['timestamp'])
else:
raise ValueError(("data must be a single data frame, "
"list, or vector that holds numeric values."))
if max_anoms > 0.49:
length = len(df.value)
raise ValueError(
("max_anoms must be less than 50% of "
"the data points (max_anoms =%f data_points =%s).")
% (round(max_anoms * length, 0), length))
if not direction in ['pos', 'neg', 'both']:
raise ValueError("direction options are: pos | neg | both.")
if not (0.01 <= alpha or alpha <= 0.1):
if verbose:
import warnings
warnings.warn(("alpha is the statistical signifigance, "
"and is usually between 0.01 and 0.1"))
if not period:
raise ValueError(("Period must be set to the number "
"of data points in a single period"))
if not isinstance(only_last, bool):
raise ValueError("only_last must be a boolean")
if not threshold in [None,'med_max','p95','p99']:
raise ValueError("threshold options are: None | med_max | p95 | p99")
if not isinstance(e_value, bool):
raise ValueError("e_value must be a boolean")
if not isinstance(plot, bool):
raise ValueError("plot must be a boolean")
if not isinstance(y_log, bool):
raise ValueError("y_log must be a boolean")
if not isinstance(xlabel, string_types):
raise ValueError("xlabel must be a string")
if not isinstance(ylabel, string_types):
raise ValueError("ylabel must be a string")
if title and not isinstance(title, string_types):
raise ValueError("title must be a string")
if not title:
title = ''
else:
title = title + " : "
# -- Main analysis: Perform S-H-ESD
num_obs = len(df.value)
clamp = (1 / float(num_obs))
if max_anoms < clamp:
max_anoms = clamp
# -- Setup for longterm time series
# If longterm is enabled, break the data into subset
# data frames and store in all_data,
if longterm_period:
all_data = []
for j in range(0, len(df.timestamp), longterm_period):
start_index = df.timestamp.iloc[j]
end_index = min((start_index + longterm_period), num_obs)
if (end_index - start_index) == longterm_period:
sub_df = df[(df.timestamp >= start_index)
& (df.timestamp <= end_index)]
else:
sub_df = df[(df.timestamp >= (num_obs - longterm_period)) &
(df.timestamp <= num_obs)]
all_data.append(sub_df)
else:
all_data = [df]
# Create empty data frames to store all anoms and
# seasonal+trend component from decomposition
all_anoms = DataFrame(columns=['timestamp', 'value'])
seasonal_plus_trend = DataFrame(columns=['timestamp', 'value'])
# Detect anomalies on all data (either entire data in one-pass,
# or in 2 week blocks if longterm=TRUE)
for i in range(len(all_data)):
directions = {
'pos': Direction(True, True),
'neg': Direction(True, False),
'both': Direction(False, True)
}
anomaly_direction = directions[direction]
s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms,
alpha=alpha,
num_obs_per_period=period,
use_decomp=True,
one_tail=anomaly_direction.one_tail,
upper_tail=anomaly_direction.upper_tail,
verbose=verbose)
# store decomposed components in local variable and
# overwrite s_h_esd_timestamps to contain only the anom timestamps
data_decomp = s_h_esd_timestamps['stl']
s_h_esd_timestamps = s_h_esd_timestamps['anoms']
# -- Step 3: Use detected anomaly timestamps to
# extract the actual anomalies (timestamp and value) from the data
if s_h_esd_timestamps:
anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
else:
anoms = DataFrame(columns=['timestamp', 'value'])
# Filter the anomalies using one of the thresholding
# functions if applicable
if threshold:
# Calculate daily max values
if isinstance(all_data[i].index[0], Timestamp):
group = all_data[i].timestamp.map(Timestamp.date)
else:
group = all_data[i].timestamp.map(lambda t: int(t / period))
periodic_maxes = df.groupby(group).aggregate(np.max).value
# Calculate the threshold set by the user
if threshold == 'med_max':
thresh = periodic_maxes.median()
elif threshold == 'p95':
thresh = periodic_maxes.quantile(.95)
elif threshold == 'p99':
thresh = periodic_maxes.quantile(.99)
# Remove any anoms below the threshold
anoms = anoms[anoms.value >= thresh]
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)
# Cleanup potential duplicates
try:
all_anoms.drop_duplicates(subset=['timestamp'])
seasonal_plus_trend.drop_duplicates(subset=['timestamp'])
except TypeError:
all_anoms.drop_duplicates(cols=['timestamp'])
seasonal_plus_trend.drop_duplicates(cols=['timestamp'])
# -- If only_last was set by the user, create subset of
# the data that represent the most recent period
if only_last:
d = {
'timestamp': df.timestamp.iloc[-period:],
'value': df.value.iloc[-period:]
}
x_subset_single_period = DataFrame(d, index = d['timestamp'])
past_obs = period * 7
if num_obs < past_obs:
past_obs = num_obs - period
# When plotting anoms for the last period only we only show
# the previous 7 periods of data
d = {
'timestamp': df.timestamp.iloc[-past_obs:-period],
'value': df.value.iloc[-past_obs:-period]
}
x_subset_previous = DataFrame(d, index=d['timestamp'])
all_anoms = all_anoms[all_anoms.timestamp
>= x_subset_single_period.timestamp.iloc[0]]
num_obs = len(x_subset_single_period.value)
# Calculate number of anomalies as a percentage
anom_pct = (len(df.value) / float(num_obs)) * 100
if anom_pct == 0:
return {
"anoms": None,
"plot": None
}
# The original R implementation handles plotting here.
# Plotting is currently not implemented.
# if plot:
# plot_something()
all_anoms.index = all_anoms.timestamp
if e_value:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value,
'expected_value': seasonal_plus_trend[
seasonal_plus_trend.timestamp.isin(
all_anoms.timestamp)].value
}
else:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value
}
anoms = DataFrame(d, index=d['timestamp'].index)
return {
'anoms': anoms,
'plot': None
} | ['def', 'detect_vec', '(', 'df', ',', 'max_anoms', '=', '0.10', ',', 'direction', '=', "'pos'", ',', 'alpha', '=', '0.05', ',', 'period', '=', 'None', ',', 'only_last', '=', 'False', ',', 'threshold', '=', 'None', ',', 'e_value', '=', 'False', ',', 'longterm_period', '=', 'None', ',', 'plot', '=', 'False', ',', 'y_log', '=', 'False', ',', 'xlabel', '=', "''", ',', 'ylabel', '=', "'count'", ',', 'title', '=', 'None', ',', 'verbose', '=', 'False', ')', ':', 'if', '(', 'isinstance', '(', 'df', ',', 'DataFrame', ')', 'and', 'len', '(', 'df', '.', 'columns', ')', '==', '1', 'and', 'df', '.', 'iloc', '[', ':', ',', '0', ']', '.', 'applymap', '(', 'np', '.', 'isreal', ')', '.', 'all', '(', '1', ')', ')', ':', 'd', '=', '{', "'timestamp'", ':', 'range', '(', 'len', '(', 'df', '.', 'iloc', '[', ':', ',', '0', ']', ')', ')', ',', "'value'", ':', 'df', '.', 'iloc', '[', ':', ',', '0', ']', '}', 'df', '=', 'DataFrame', '(', 'd', ',', 'index', '=', 'd', '[', "'timestamp'", ']', ')', 'elif', 'isinstance', '(', 'df', ',', 'Series', ')', ':', 'd', '=', '{', "'timestamp'", ':', 'range', '(', 'len', '(', 'df', ')', ')', ',', "'value'", ':', 'df', '}', 'df', '=', 'DataFrame', '(', 'd', ',', 'index', '=', 'd', '[', "'timestamp'", ']', ')', 'else', ':', 'raise', 'ValueError', '(', '(', '"data must be a single data frame, "', '"list, or vector that holds numeric values."', ')', ')', 'if', 'max_anoms', '>', '0.49', ':', 'length', '=', 'len', '(', 'df', '.', 'value', ')', 'raise', 'ValueError', '(', '(', '"max_anoms must be less than 50% of "', '"the data points (max_anoms =%f data_points =%s)."', ')', '%', '(', 'round', '(', 'max_anoms', '*', 'length', ',', '0', ')', ',', 'length', ')', ')', 'if', 'not', 'direction', 'in', '[', "'pos'", ',', "'neg'", ',', "'both'", ']', ':', 'raise', 'ValueError', '(', '"direction options are: pos | neg | both."', ')', 'if', 'not', '(', '0.01', '<=', 'alpha', 'or', 'alpha', '<=', '0.1', ')', ':', 'if', 'verbose', ':', 'import', 'warnings', 'warnings', '.', 'warn', '(', '(', '"alpha is the statistical signifigance, "', '"and is usually between 0.01 and 0.1"', ')', ')', 'if', 'not', 'period', ':', 'raise', 'ValueError', '(', '(', '"Period must be set to the number "', '"of data points in a single period"', ')', ')', 'if', 'not', 'isinstance', '(', 'only_last', ',', 'bool', ')', ':', 'raise', 'ValueError', '(', '"only_last must be a boolean"', ')', 'if', 'not', 'threshold', 'in', '[', 'None', ',', "'med_max'", ',', "'p95'", ',', "'p99'", ']', ':', 'raise', 'ValueError', '(', '"threshold options are: None | med_max | p95 | p99"', ')', 'if', 'not', 'isinstance', '(', 'e_value', ',', 'bool', ')', ':', 'raise', 'ValueError', '(', '"e_value must be a boolean"', ')', 'if', 'not', 'isinstance', '(', 'plot', ',', 'bool', ')', ':', 'raise', 'ValueError', '(', '"plot must be a boolean"', ')', 'if', 'not', 'isinstance', '(', 'y_log', ',', 'bool', ')', ':', 'raise', 'ValueError', '(', '"y_log must be a boolean"', ')', 'if', 'not', 'isinstance', '(', 'xlabel', ',', 'string_types', ')', ':', 'raise', 'ValueError', '(', '"xlabel must be a string"', ')', 'if', 'not', 'isinstance', '(', 'ylabel', ',', 'string_types', ')', ':', 'raise', 'ValueError', '(', '"ylabel must be a string"', ')', 'if', 'title', 'and', 'not', 'isinstance', '(', 'title', ',', 'string_types', ')', ':', 'raise', 'ValueError', '(', '"title must be a string"', ')', 'if', 'not', 'title', ':', 'title', '=', "''", 'else', ':', 'title', '=', 'title', '+', '" : "', '# -- Main analysis: Perform S-H-ESD', 'num_obs', '=', 'len', '(', 'df', '.', 'value', ')', 'clamp', '=', '(', '1', '/', 'float', '(', 'num_obs', ')', ')', 'if', 'max_anoms', '<', 'clamp', ':', 'max_anoms', '=', 'clamp', '# -- Setup for longterm time series', '# If longterm is enabled, break the data into subset', '# data frames and store in all_data,', 'if', 'longterm_period', ':', 'all_data', '=', '[', ']', 'for', 'j', 'in', 'range', '(', '0', ',', 'len', '(', 'df', '.', 'timestamp', ')', ',', 'longterm_period', ')', ':', 'start_index', '=', 'df', '.', 'timestamp', '.', 'iloc', '[', 'j', ']', 'end_index', '=', 'min', '(', '(', 'start_index', '+', 'longterm_period', ')', ',', 'num_obs', ')', 'if', '(', 'end_index', '-', 'start_index', ')', '==', 'longterm_period', ':', 'sub_df', '=', 'df', '[', '(', 'df', '.', 'timestamp', '>=', 'start_index', ')', '&', '(', 'df', '.', 'timestamp', '<=', 'end_index', ')', ']', 'else', ':', 'sub_df', '=', 'df', '[', '(', 'df', '.', 'timestamp', '>=', '(', 'num_obs', '-', 'longterm_period', ')', ')', '&', '(', 'df', '.', 'timestamp', '<=', 'num_obs', ')', ']', 'all_data', '.', 'append', '(', 'sub_df', ')', 'else', ':', 'all_data', '=', '[', 'df', ']', '# Create empty data frames to store all anoms and', '# seasonal+trend component from decomposition', 'all_anoms', '=', 'DataFrame', '(', 'columns', '=', '[', "'timestamp'", ',', "'value'", ']', ')', 'seasonal_plus_trend', '=', 'DataFrame', '(', 'columns', '=', '[', "'timestamp'", ',', "'value'", ']', ')', '# Detect anomalies on all data (either entire data in one-pass,', '# or in 2 week blocks if longterm=TRUE)', 'for', 'i', 'in', 'range', '(', 'len', '(', 'all_data', ')', ')', ':', 'directions', '=', '{', "'pos'", ':', 'Direction', '(', 'True', ',', 'True', ')', ',', "'neg'", ':', 'Direction', '(', 'True', ',', 'False', ')', ',', "'both'", ':', 'Direction', '(', 'False', ',', 'True', ')', '}', 'anomaly_direction', '=', 'directions', '[', 'direction', ']', 's_h_esd_timestamps', '=', 'detect_anoms', '(', 'all_data', '[', 'i', ']', ',', 'k', '=', 'max_anoms', ',', 'alpha', '=', 'alpha', ',', 'num_obs_per_period', '=', 'period', ',', 'use_decomp', '=', 'True', ',', 'one_tail', '=', 'anomaly_direction', '.', 'one_tail', ',', 'upper_tail', '=', 'anomaly_direction', '.', 'upper_tail', ',', 'verbose', '=', 'verbose', ')', '# store decomposed components in local variable and', '# overwrite s_h_esd_timestamps to contain only the anom timestamps', 'data_decomp', '=', 's_h_esd_timestamps', '[', "'stl'", ']', 's_h_esd_timestamps', '=', 's_h_esd_timestamps', '[', "'anoms'", ']', '# -- Step 3: Use detected anomaly timestamps to', '# extract the actual anomalies (timestamp and value) from the data', 'if', 's_h_esd_timestamps', ':', 'anoms', '=', 'all_data', '[', 'i', ']', '[', 'all_data', '[', 'i', ']', '.', 'timestamp', '.', 'isin', '(', 's_h_esd_timestamps', ')', ']', 'else', ':', 'anoms', '=', 'DataFrame', '(', 'columns', '=', '[', "'timestamp'", ',', "'value'", ']', ')', '# Filter the anomalies using one of the thresholding', '# functions if applicable', 'if', 'threshold', ':', '# Calculate daily max values', 'if', 'isinstance', '(', 'all_data', '[', 'i', ']', '.', 'index', '[', '0', ']', ',', 'Timestamp', ')', ':', 'group', '=', 'all_data', '[', 'i', ']', '.', 'timestamp', '.', 'map', '(', 'Timestamp', '.', 'date', ')', 'else', ':', 'group', '=', 'all_data', '[', 'i', ']', '.', 'timestamp', '.', 'map', '(', 'lambda', 't', ':', 'int', '(', 't', '/', 'period', ')', ')', 'periodic_maxes', '=', 'df', '.', 'groupby', '(', 'group', ')', '.', 'aggregate', '(', 'np', '.', 'max', ')', '.', 'value', '# Calculate the threshold set by the user', 'if', 'threshold', '==', "'med_max'", ':', 'thresh', '=', 'periodic_maxes', '.', 'median', '(', ')', 'elif', 'threshold', '==', "'p95'", ':', 'thresh', '=', 'periodic_maxes', '.', 'quantile', '(', '.95', ')', 'elif', 'threshold', '==', "'p99'", ':', 'thresh', '=', 'periodic_maxes', '.', 'quantile', '(', '.99', ')', '# Remove any anoms below the threshold', 'anoms', '=', 'anoms', '[', 'anoms', '.', 'value', '>=', 'thresh', ']', 'all_anoms', '=', 'all_anoms', '.', 'append', '(', 'anoms', ')', 'seasonal_plus_trend', '=', 'seasonal_plus_trend', '.', 'append', '(', 'data_decomp', ')', '# Cleanup potential duplicates', 'try', ':', 'all_anoms', '.', 'drop_duplicates', '(', 'subset', '=', '[', "'timestamp'", ']', ')', 'seasonal_plus_trend', '.', 'drop_duplicates', '(', 'subset', '=', '[', "'timestamp'", ']', ')', 'except', 'TypeError', ':', 'all_anoms', '.', 'drop_duplicates', '(', 'cols', '=', '[', "'timestamp'", ']', ')', 'seasonal_plus_trend', '.', 'drop_duplicates', '(', 'cols', '=', '[', "'timestamp'", ']', ')', '# -- If only_last was set by the user, create subset of', '# the data that represent the most recent period', 'if', 'only_last', ':', 'd', '=', '{', "'timestamp'", ':', 'df', '.', 'timestamp', '.', 'iloc', '[', '-', 'period', ':', ']', ',', "'value'", ':', 'df', '.', 'value', '.', 'iloc', '[', '-', 'period', ':', ']', '}', 'x_subset_single_period', '=', 'DataFrame', '(', 'd', ',', 'index', '=', 'd', '[', "'timestamp'", ']', ')', 'past_obs', '=', 'period', '*', '7', 'if', 'num_obs', '<', 'past_obs', ':', 'past_obs', '=', 'num_obs', '-', 'period', '# When plotting anoms for the last period only we only show', '# the previous 7 periods of data', 'd', '=', '{', "'timestamp'", ':', 'df', '.', 'timestamp', '.', 'iloc', '[', '-', 'past_obs', ':', '-', 'period', ']', ',', "'value'", ':', 'df', '.', 'value', '.', 'iloc', '[', '-', 'past_obs', ':', '-', 'period', ']', '}', 'x_subset_previous', '=', 'DataFrame', '(', 'd', ',', 'index', '=', 'd', '[', "'timestamp'", ']', ')', 'all_anoms', '=', 'all_anoms', '[', 'all_anoms', '.', 'timestamp', '>=', 'x_subset_single_period', '.', 'timestamp', '.', 'iloc', '[', '0', ']', ']', 'num_obs', '=', 'len', '(', 'x_subset_single_period', '.', 'value', ')', '# Calculate number of anomalies as a percentage', 'anom_pct', '=', '(', 'len', '(', 'df', '.', 'value', ')', '/', 'float', '(', 'num_obs', ')', ')', '*', '100', 'if', 'anom_pct', '==', '0', ':', 'return', '{', '"anoms"', ':', 'None', ',', '"plot"', ':', 'None', '}', '# The original R implementation handles plotting here.', '# Plotting is currently not implemented.', '# if plot:', '# plot_something()', 'all_anoms', '.', 'index', '=', 'all_anoms', '.', 'timestamp', 'if', 'e_value', ':', 'd', '=', '{', "'timestamp'", ':', 'all_anoms', '.', 'timestamp', ',', "'anoms'", ':', 'all_anoms', '.', 'value', ',', "'expected_value'", ':', 'seasonal_plus_trend', '[', 'seasonal_plus_trend', '.', 'timestamp', '.', 'isin', '(', 'all_anoms', '.', 'timestamp', ')', ']', '.', 'value', '}', 'else', ':', 'd', '=', '{', "'timestamp'", ':', 'all_anoms', '.', 'timestamp', ',', "'anoms'", ':', 'all_anoms', '.', 'value', '}', 'anoms', '=', 'DataFrame', '(', 'd', ',', 'index', '=', 'd', '[', "'timestamp'", ']', '.', 'index', ')', 'return', '{', "'anoms'", ':', 'anoms', ',', "'plot'", ':', 'None', '}'] | Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of observations.
Args:
x: Time series as a column data frame, list, or vector, where the column consists of
the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
period: Defines the number of observations in a single period, and used during seasonal
decomposition.
only_last: Find and report anomalies only within the last period in the time series.
threshold: Only report positive going anoms above the threshold specified. Options are: ('None' | 'med_max' | 'p95' | 'p99').
e_value: Add an additional column to the anoms output containing the expected value.
longterm_period: Defines the number of observations for which the trend can be considered
flat. The value should be an integer multiple of the number of observations in a single period.
This increases anom detection efficacy for time series that are greater than a month.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm_period' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing index, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series. | ['Anomaly', 'Detection', 'Using', 'Seasonal', 'Hybrid', 'ESD', 'Test'] | train | https://github.com/zrnsm/pyculiarity/blob/a06f5977cbf60a8805fd364f834aa09be108f2f3/pyculiarity/detect_vec.py#L11-L293 |
578 | sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.download | def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info | python | def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info | ['def', 'download', '(', 'self', ',', 'id', ',', 'directory_path', '=', "'.'", ',', 'checksum', '=', 'True', ')', ':', 'product_info', '=', 'self', '.', 'get_product_odata', '(', 'id', ')', 'path', '=', 'join', '(', 'directory_path', ',', 'product_info', '[', "'title'", ']', '+', "'.zip'", ')', 'product_info', '[', "'path'", ']', '=', 'path', 'product_info', '[', "'downloaded_bytes'", ']', '=', '0', 'self', '.', 'logger', '.', 'info', '(', "'Downloading %s to %s'", ',', 'id', ',', 'path', ')', 'if', 'exists', '(', 'path', ')', ':', '# We assume that the product has been downloaded and is complete', 'return', 'product_info', '# An incomplete download triggers the retrieval from the LTA if the product is not online', 'if', 'not', 'product_info', '[', "'Online'", ']', ':', 'self', '.', 'logger', '.', 'warning', '(', "'Product %s is not online. Triggering retrieval from long term archive.'", ',', 'product_info', '[', "'id'", ']', ')', 'self', '.', '_trigger_offline_retrieval', '(', 'product_info', '[', "'url'", ']', ')', 'return', 'product_info', '# Use a temporary file for downloading', 'temp_path', '=', 'path', '+', "'.incomplete'", 'skip_download', '=', 'False', 'if', 'exists', '(', 'temp_path', ')', ':', 'if', 'getsize', '(', 'temp_path', ')', '>', 'product_info', '[', "'size'", ']', ':', 'self', '.', 'logger', '.', 'warning', '(', '"Existing incomplete file %s is larger than the expected final size"', '" (%s vs %s bytes). Deleting it."', ',', 'str', '(', 'temp_path', ')', ',', 'getsize', '(', 'temp_path', ')', ',', 'product_info', '[', "'size'", ']', ')', 'remove', '(', 'temp_path', ')', 'elif', 'getsize', '(', 'temp_path', ')', '==', 'product_info', '[', "'size'", ']', ':', 'if', 'self', '.', '_md5_compare', '(', 'temp_path', ',', 'product_info', '[', "'md5'", ']', ')', ':', 'skip_download', '=', 'True', 'else', ':', '# Log a warning since this should never happen', 'self', '.', 'logger', '.', 'warning', '(', '"Existing incomplete file %s appears to be fully downloaded but "', '"its checksum is incorrect. Deleting it."', ',', 'str', '(', 'temp_path', ')', ')', 'remove', '(', 'temp_path', ')', 'else', ':', '# continue downloading', 'self', '.', 'logger', '.', 'info', '(', '"Download will resume from existing incomplete file %s."', ',', 'temp_path', ')', 'pass', 'if', 'not', 'skip_download', ':', '# Store the number of downloaded bytes for unit tests', 'product_info', '[', "'downloaded_bytes'", ']', '=', 'self', '.', '_download', '(', 'product_info', '[', "'url'", ']', ',', 'temp_path', ',', 'self', '.', 'session', ',', 'product_info', '[', "'size'", ']', ')', '# Check integrity with MD5 checksum', 'if', 'checksum', 'is', 'True', ':', 'if', 'not', 'self', '.', '_md5_compare', '(', 'temp_path', ',', 'product_info', '[', "'md5'", ']', ')', ':', 'remove', '(', 'temp_path', ')', 'raise', 'InvalidChecksumError', '(', "'File corrupt: checksums do not match'", ')', '# Download successful, rename the temporary file to its proper name', 'shutil', '.', 'move', '(', 'temp_path', ',', 'path', ')', 'return', 'product_info'] | Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server. | ['Download', 'a', 'product', '.'] | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L463-L552 |
579 | explosion/thinc | examples/spacy_tagger.py | Shape | def Shape(docs, drop=0.0):
"""Get word shapes."""
ids = numpy.zeros((sum(len(doc) for doc in docs),), dtype="i")
i = 0
for doc in docs:
for token in doc:
ids[i] = token.shape
i += 1
return ids, None | python | def Shape(docs, drop=0.0):
"""Get word shapes."""
ids = numpy.zeros((sum(len(doc) for doc in docs),), dtype="i")
i = 0
for doc in docs:
for token in doc:
ids[i] = token.shape
i += 1
return ids, None | ['def', 'Shape', '(', 'docs', ',', 'drop', '=', '0.0', ')', ':', 'ids', '=', 'numpy', '.', 'zeros', '(', '(', 'sum', '(', 'len', '(', 'doc', ')', 'for', 'doc', 'in', 'docs', ')', ',', ')', ',', 'dtype', '=', '"i"', ')', 'i', '=', '0', 'for', 'doc', 'in', 'docs', ':', 'for', 'token', 'in', 'doc', ':', 'ids', '[', 'i', ']', '=', 'token', '.', 'shape', 'i', '+=', '1', 'return', 'ids', ',', 'None'] | Get word shapes. | ['Get', 'word', 'shapes', '.'] | train | https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/examples/spacy_tagger.py#L58-L66 |
580 | bitshares/uptick | uptick/wallet.py | addkey | def addkey(ctx, key):
""" Add a private key to the wallet
"""
if not key:
while True:
key = click.prompt(
"Private Key (wif) [Enter to quit]",
hide_input=True,
show_default=False,
default="exit",
)
if not key or key == "exit":
break
try:
ctx.bitshares.wallet.addPrivateKey(key)
except Exception as e:
click.echo(str(e))
continue
else:
for k in key:
try:
ctx.bitshares.wallet.addPrivateKey(k)
except Exception as e:
click.echo(str(e))
installedKeys = ctx.bitshares.wallet.getPublicKeys()
if len(installedKeys) == 1:
name = ctx.bitshares.wallet.getAccountFromPublicKey(installedKeys[0])
if name: # only if a name to the key was found
account = Account(name, bitshares_instance=ctx.bitshares)
click.echo("=" * 30)
click.echo("Setting new default user: %s" % account["name"])
click.echo()
click.echo("You can change these settings with:")
click.echo(" uptick set default_account <account>")
click.echo("=" * 30)
config["default_account"] = account["name"] | python | def addkey(ctx, key):
""" Add a private key to the wallet
"""
if not key:
while True:
key = click.prompt(
"Private Key (wif) [Enter to quit]",
hide_input=True,
show_default=False,
default="exit",
)
if not key or key == "exit":
break
try:
ctx.bitshares.wallet.addPrivateKey(key)
except Exception as e:
click.echo(str(e))
continue
else:
for k in key:
try:
ctx.bitshares.wallet.addPrivateKey(k)
except Exception as e:
click.echo(str(e))
installedKeys = ctx.bitshares.wallet.getPublicKeys()
if len(installedKeys) == 1:
name = ctx.bitshares.wallet.getAccountFromPublicKey(installedKeys[0])
if name: # only if a name to the key was found
account = Account(name, bitshares_instance=ctx.bitshares)
click.echo("=" * 30)
click.echo("Setting new default user: %s" % account["name"])
click.echo()
click.echo("You can change these settings with:")
click.echo(" uptick set default_account <account>")
click.echo("=" * 30)
config["default_account"] = account["name"] | ['def', 'addkey', '(', 'ctx', ',', 'key', ')', ':', 'if', 'not', 'key', ':', 'while', 'True', ':', 'key', '=', 'click', '.', 'prompt', '(', '"Private Key (wif) [Enter to quit]"', ',', 'hide_input', '=', 'True', ',', 'show_default', '=', 'False', ',', 'default', '=', '"exit"', ',', ')', 'if', 'not', 'key', 'or', 'key', '==', '"exit"', ':', 'break', 'try', ':', 'ctx', '.', 'bitshares', '.', 'wallet', '.', 'addPrivateKey', '(', 'key', ')', 'except', 'Exception', 'as', 'e', ':', 'click', '.', 'echo', '(', 'str', '(', 'e', ')', ')', 'continue', 'else', ':', 'for', 'k', 'in', 'key', ':', 'try', ':', 'ctx', '.', 'bitshares', '.', 'wallet', '.', 'addPrivateKey', '(', 'k', ')', 'except', 'Exception', 'as', 'e', ':', 'click', '.', 'echo', '(', 'str', '(', 'e', ')', ')', 'installedKeys', '=', 'ctx', '.', 'bitshares', '.', 'wallet', '.', 'getPublicKeys', '(', ')', 'if', 'len', '(', 'installedKeys', ')', '==', '1', ':', 'name', '=', 'ctx', '.', 'bitshares', '.', 'wallet', '.', 'getAccountFromPublicKey', '(', 'installedKeys', '[', '0', ']', ')', 'if', 'name', ':', '# only if a name to the key was found', 'account', '=', 'Account', '(', 'name', ',', 'bitshares_instance', '=', 'ctx', '.', 'bitshares', ')', 'click', '.', 'echo', '(', '"="', '*', '30', ')', 'click', '.', 'echo', '(', '"Setting new default user: %s"', '%', 'account', '[', '"name"', ']', ')', 'click', '.', 'echo', '(', ')', 'click', '.', 'echo', '(', '"You can change these settings with:"', ')', 'click', '.', 'echo', '(', '" uptick set default_account <account>"', ')', 'click', '.', 'echo', '(', '"="', '*', '30', ')', 'config', '[', '"default_account"', ']', '=', 'account', '[', '"name"', ']'] | Add a private key to the wallet | ['Add', 'a', 'private', 'key', 'to', 'the', 'wallet'] | train | https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/wallet.py#L47-L83 |
581 | nhoffman/fastalite | fastalite/fastalite.py | fastalite | def fastalite(handle):
"""Return a sequence of namedtuple objects from a fasta file with
attributes (id, description, seq) given open file-like object
``handle``
"""
Seq = namedtuple('Seq', ['id', 'description', 'seq'])
header, seq = '', []
for line in handle:
if line.startswith('>'):
if header:
yield Seq(header.split()[0], header, ''.join(seq))
header, seq = line[1:].strip(), []
else:
seq.append(line.strip())
if header and seq:
yield Seq(header.split()[0], header, ''.join(seq)) | python | def fastalite(handle):
"""Return a sequence of namedtuple objects from a fasta file with
attributes (id, description, seq) given open file-like object
``handle``
"""
Seq = namedtuple('Seq', ['id', 'description', 'seq'])
header, seq = '', []
for line in handle:
if line.startswith('>'):
if header:
yield Seq(header.split()[0], header, ''.join(seq))
header, seq = line[1:].strip(), []
else:
seq.append(line.strip())
if header and seq:
yield Seq(header.split()[0], header, ''.join(seq)) | ['def', 'fastalite', '(', 'handle', ')', ':', 'Seq', '=', 'namedtuple', '(', "'Seq'", ',', '[', "'id'", ',', "'description'", ',', "'seq'", ']', ')', 'header', ',', 'seq', '=', "''", ',', '[', ']', 'for', 'line', 'in', 'handle', ':', 'if', 'line', '.', 'startswith', '(', "'>'", ')', ':', 'if', 'header', ':', 'yield', 'Seq', '(', 'header', '.', 'split', '(', ')', '[', '0', ']', ',', 'header', ',', "''", '.', 'join', '(', 'seq', ')', ')', 'header', ',', 'seq', '=', 'line', '[', '1', ':', ']', '.', 'strip', '(', ')', ',', '[', ']', 'else', ':', 'seq', '.', 'append', '(', 'line', '.', 'strip', '(', ')', ')', 'if', 'header', 'and', 'seq', ':', 'yield', 'Seq', '(', 'header', '.', 'split', '(', ')', '[', '0', ']', ',', 'header', ',', "''", '.', 'join', '(', 'seq', ')', ')'] | Return a sequence of namedtuple objects from a fasta file with
attributes (id, description, seq) given open file-like object
``handle`` | ['Return', 'a', 'sequence', 'of', 'namedtuple', 'objects', 'from', 'a', 'fasta', 'file', 'with', 'attributes', '(', 'id', 'description', 'seq', ')', 'given', 'open', 'file', '-', 'like', 'object', 'handle'] | train | https://github.com/nhoffman/fastalite/blob/d544a9e2b5150cf59f0f9651f6f3d659caf13848/fastalite/fastalite.py#L52-L71 |
582 | santoshphilip/eppy | eppy/modeleditor.py | IDF.newidfobject | def newidfobject(self, key, aname='', defaultvalues=True, **kwargs):
"""
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
"""
obj = newrawobject(self.model, self.idd_info,
key, block=self.block, defaultvalues=defaultvalues)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn("The aname parameter should no longer be used.", UserWarning)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch)
for k, v in list(kwargs.items()):
abunch[k] = v
return abunch | python | def newidfobject(self, key, aname='', defaultvalues=True, **kwargs):
"""
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
"""
obj = newrawobject(self.model, self.idd_info,
key, block=self.block, defaultvalues=defaultvalues)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn("The aname parameter should no longer be used.", UserWarning)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch)
for k, v in list(kwargs.items()):
abunch[k] = v
return abunch | ['def', 'newidfobject', '(', 'self', ',', 'key', ',', 'aname', '=', "''", ',', 'defaultvalues', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'obj', '=', 'newrawobject', '(', 'self', '.', 'model', ',', 'self', '.', 'idd_info', ',', 'key', ',', 'block', '=', 'self', '.', 'block', ',', 'defaultvalues', '=', 'defaultvalues', ')', 'abunch', '=', 'obj2bunch', '(', 'self', '.', 'model', ',', 'self', '.', 'idd_info', ',', 'obj', ')', 'if', 'aname', ':', 'warnings', '.', 'warn', '(', '"The aname parameter should no longer be used."', ',', 'UserWarning', ')', 'namebunch', '(', 'abunch', ',', 'aname', ')', 'self', '.', 'idfobjects', '[', 'key', ']', '.', 'append', '(', 'abunch', ')', 'for', 'k', ',', 'v', 'in', 'list', '(', 'kwargs', '.', 'items', '(', ')', ')', ':', 'abunch', '[', 'k', ']', '=', 'v', 'return', 'abunch'] | Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object | ['Add', 'a', 'new', 'idfobject', 'to', 'the', 'model', '.', 'If', 'you', 'don', 't', 'specify', 'a', 'value', 'for', 'a', 'field', 'the', 'default', 'value', 'will', 'be', 'set', '.'] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L713-L755 |
583 | KelSolaar/Umbra | umbra/components/factory/script_editor/models.py | ProjectsModel.register_file | def register_file(self, file, parent, ensure_uniqueness=False):
"""
Registers given file in the Model.
:param file: File to register.
:type file: unicode
:param parent: FileNode parent.
:type parent: GraphModelNode
:param ensure_uniqueness: Ensure registrar uniqueness.
:type ensure_uniqueness: bool
:return: FileNode.
:rtype: FileNode
"""
if ensure_uniqueness:
if self.get_file_nodes(file):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' file is already registered!".format(
self.__class__.__name__, file))
LOGGER.debug("> Registering '{0}' file.".format(file))
row = parent.children_count()
self.beginInsertRows(self.get_node_index(parent), row, row)
file_node = FileNode(name=os.path.basename(file),
path=file,
parent=parent)
self.endInsertRows()
self.file_registered.emit(file_node)
return file_node | python | def register_file(self, file, parent, ensure_uniqueness=False):
"""
Registers given file in the Model.
:param file: File to register.
:type file: unicode
:param parent: FileNode parent.
:type parent: GraphModelNode
:param ensure_uniqueness: Ensure registrar uniqueness.
:type ensure_uniqueness: bool
:return: FileNode.
:rtype: FileNode
"""
if ensure_uniqueness:
if self.get_file_nodes(file):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' file is already registered!".format(
self.__class__.__name__, file))
LOGGER.debug("> Registering '{0}' file.".format(file))
row = parent.children_count()
self.beginInsertRows(self.get_node_index(parent), row, row)
file_node = FileNode(name=os.path.basename(file),
path=file,
parent=parent)
self.endInsertRows()
self.file_registered.emit(file_node)
return file_node | ['def', 'register_file', '(', 'self', ',', 'file', ',', 'parent', ',', 'ensure_uniqueness', '=', 'False', ')', ':', 'if', 'ensure_uniqueness', ':', 'if', 'self', '.', 'get_file_nodes', '(', 'file', ')', ':', 'raise', 'foundations', '.', 'exceptions', '.', 'ProgrammingError', '(', '"{0} | \'{1}\' file is already registered!"', '.', 'format', '(', 'self', '.', '__class__', '.', '__name__', ',', 'file', ')', ')', 'LOGGER', '.', 'debug', '(', '"> Registering \'{0}\' file."', '.', 'format', '(', 'file', ')', ')', 'row', '=', 'parent', '.', 'children_count', '(', ')', 'self', '.', 'beginInsertRows', '(', 'self', '.', 'get_node_index', '(', 'parent', ')', ',', 'row', ',', 'row', ')', 'file_node', '=', 'FileNode', '(', 'name', '=', 'os', '.', 'path', '.', 'basename', '(', 'file', ')', ',', 'path', '=', 'file', ',', 'parent', '=', 'parent', ')', 'self', '.', 'endInsertRows', '(', ')', 'self', '.', 'file_registered', '.', 'emit', '(', 'file_node', ')', 'return', 'file_node'] | Registers given file in the Model.
:param file: File to register.
:type file: unicode
:param parent: FileNode parent.
:type parent: GraphModelNode
:param ensure_uniqueness: Ensure registrar uniqueness.
:type ensure_uniqueness: bool
:return: FileNode.
:rtype: FileNode | ['Registers', 'given', 'file', 'in', 'the', 'Model', '.'] | train | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/models.py#L442-L472 |
584 | twilio/twilio-python | twilio/rest/wireless/v1/sim/__init__.py | SimContext.usage_records | def usage_records(self):
"""
Access the usage_records
:returns: twilio.rest.wireless.v1.sim.usage_record.UsageRecordList
:rtype: twilio.rest.wireless.v1.sim.usage_record.UsageRecordList
"""
if self._usage_records is None:
self._usage_records = UsageRecordList(self._version, sim_sid=self._solution['sid'], )
return self._usage_records | python | def usage_records(self):
"""
Access the usage_records
:returns: twilio.rest.wireless.v1.sim.usage_record.UsageRecordList
:rtype: twilio.rest.wireless.v1.sim.usage_record.UsageRecordList
"""
if self._usage_records is None:
self._usage_records = UsageRecordList(self._version, sim_sid=self._solution['sid'], )
return self._usage_records | ['def', 'usage_records', '(', 'self', ')', ':', 'if', 'self', '.', '_usage_records', 'is', 'None', ':', 'self', '.', '_usage_records', '=', 'UsageRecordList', '(', 'self', '.', '_version', ',', 'sim_sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_usage_records'] | Access the usage_records
:returns: twilio.rest.wireless.v1.sim.usage_record.UsageRecordList
:rtype: twilio.rest.wireless.v1.sim.usage_record.UsageRecordList | ['Access', 'the', 'usage_records'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/wireless/v1/sim/__init__.py#L345-L354 |
585 | CI-WATER/gsshapy | gsshapy/orm/evt.py | ProjectFileEventManager._write | def _write(self, session, openFile, replaceParamFile=None):
"""
ProjectFileEvent Write to File Method
"""
openFile.write(
text(
yaml.dump([evt.as_yml() for evt in
self.events.order_by(ProjectFileEvent.name,
ProjectFileEvent.subfolder)]
)
)
) | python | def _write(self, session, openFile, replaceParamFile=None):
"""
ProjectFileEvent Write to File Method
"""
openFile.write(
text(
yaml.dump([evt.as_yml() for evt in
self.events.order_by(ProjectFileEvent.name,
ProjectFileEvent.subfolder)]
)
)
) | ['def', '_write', '(', 'self', ',', 'session', ',', 'openFile', ',', 'replaceParamFile', '=', 'None', ')', ':', 'openFile', '.', 'write', '(', 'text', '(', 'yaml', '.', 'dump', '(', '[', 'evt', '.', 'as_yml', '(', ')', 'for', 'evt', 'in', 'self', '.', 'events', '.', 'order_by', '(', 'ProjectFileEvent', '.', 'name', ',', 'ProjectFileEvent', '.', 'subfolder', ')', ']', ')', ')', ')'] | ProjectFileEvent Write to File Method | ['ProjectFileEvent', 'Write', 'to', 'File', 'Method'] | train | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/evt.py#L48-L59 |
586 | crate/crate-python | src/crate/client/blob.py | BlobContainer.exists | def exists(self, digest):
"""
Check if a blob exists
:param digest: Hex digest of the blob
:return: Boolean indicating existence of the blob
"""
return self.conn.client.blob_exists(self.container_name, digest) | python | def exists(self, digest):
"""
Check if a blob exists
:param digest: Hex digest of the blob
:return: Boolean indicating existence of the blob
"""
return self.conn.client.blob_exists(self.container_name, digest) | ['def', 'exists', '(', 'self', ',', 'digest', ')', ':', 'return', 'self', '.', 'conn', '.', 'client', '.', 'blob_exists', '(', 'self', '.', 'container_name', ',', 'digest', ')'] | Check if a blob exists
:param digest: Hex digest of the blob
:return: Boolean indicating existence of the blob | ['Check', 'if', 'a', 'blob', 'exists'] | train | https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/blob.py#L93-L100 |
587 | SFDO-Tooling/CumulusCI | cumulusci/cli/cci.py | timestamp_file | def timestamp_file():
"""Opens a file for tracking the time of the last version check"""
config_dir = os.path.join(
os.path.expanduser("~"), BaseGlobalConfig.config_local_dir
)
if not os.path.exists(config_dir):
os.mkdir(config_dir)
timestamp_file = os.path.join(config_dir, "cumulus_timestamp")
try:
with open(timestamp_file, "r+") as f:
yield f
except IOError: # file does not exist
with open(timestamp_file, "w+") as f:
yield f | python | def timestamp_file():
"""Opens a file for tracking the time of the last version check"""
config_dir = os.path.join(
os.path.expanduser("~"), BaseGlobalConfig.config_local_dir
)
if not os.path.exists(config_dir):
os.mkdir(config_dir)
timestamp_file = os.path.join(config_dir, "cumulus_timestamp")
try:
with open(timestamp_file, "r+") as f:
yield f
except IOError: # file does not exist
with open(timestamp_file, "w+") as f:
yield f | ['def', 'timestamp_file', '(', ')', ':', 'config_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'expanduser', '(', '"~"', ')', ',', 'BaseGlobalConfig', '.', 'config_local_dir', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'config_dir', ')', ':', 'os', '.', 'mkdir', '(', 'config_dir', ')', 'timestamp_file', '=', 'os', '.', 'path', '.', 'join', '(', 'config_dir', ',', '"cumulus_timestamp"', ')', 'try', ':', 'with', 'open', '(', 'timestamp_file', ',', '"r+"', ')', 'as', 'f', ':', 'yield', 'f', 'except', 'IOError', ':', '# file does not exist', 'with', 'open', '(', 'timestamp_file', ',', '"w+"', ')', 'as', 'f', ':', 'yield', 'f'] | Opens a file for tracking the time of the last version check | ['Opens', 'a', 'file', 'for', 'tracking', 'the', 'time', 'of', 'the', 'last', 'version', 'check'] | train | https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/cli/cci.py#L53-L69 |
588 | AustralianSynchrotron/lightflow | lightflow/models/signal.py | Client.send | def send(self, request):
""" Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
"""
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)
while True:
if self._connection.polling_time > 0.0:
sleep(self._connection.polling_time)
response_data = self._connection.connection.get(resp_key)
if response_data is not None:
self._connection.connection.delete(resp_key)
break
return pickle.loads(response_data) | python | def send(self, request):
""" Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
"""
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)
while True:
if self._connection.polling_time > 0.0:
sleep(self._connection.polling_time)
response_data = self._connection.connection.get(resp_key)
if response_data is not None:
self._connection.connection.delete(resp_key)
break
return pickle.loads(response_data) | ['def', 'send', '(', 'self', ',', 'request', ')', ':', 'self', '.', '_connection', '.', 'connection', '.', 'rpush', '(', 'self', '.', '_request_key', ',', 'pickle', '.', 'dumps', '(', 'request', ')', ')', 'resp_key', '=', "'{}:{}'", '.', 'format', '(', 'SIGNAL_REDIS_PREFIX', ',', 'request', '.', 'uid', ')', 'while', 'True', ':', 'if', 'self', '.', '_connection', '.', 'polling_time', '>', '0.0', ':', 'sleep', '(', 'self', '.', '_connection', '.', 'polling_time', ')', 'response_data', '=', 'self', '.', '_connection', '.', 'connection', '.', 'get', '(', 'resp_key', ')', 'if', 'response_data', 'is', 'not', 'None', ':', 'self', '.', '_connection', '.', 'connection', '.', 'delete', '(', 'resp_key', ')', 'break', 'return', 'pickle', '.', 'loads', '(', 'response_data', ')'] | Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request. | ['Send', 'a', 'request', 'to', 'the', 'server', 'and', 'wait', 'for', 'its', 'response', '.'] | train | https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/signal.py#L171-L192 |
589 | oceanprotocol/squid-py | squid_py/agreements/service_agreement_condition.py | Parameter.as_dictionary | def as_dictionary(self):
"""
Return the parameter as a dictionary.
:return: dict
"""
return {
"name": self.name,
"type": self.type,
"value": remove_0x_prefix(self.value) if self.type == 'bytes32' else self.value
} | python | def as_dictionary(self):
"""
Return the parameter as a dictionary.
:return: dict
"""
return {
"name": self.name,
"type": self.type,
"value": remove_0x_prefix(self.value) if self.type == 'bytes32' else self.value
} | ['def', 'as_dictionary', '(', 'self', ')', ':', 'return', '{', '"name"', ':', 'self', '.', 'name', ',', '"type"', ':', 'self', '.', 'type', ',', '"value"', ':', 'remove_0x_prefix', '(', 'self', '.', 'value', ')', 'if', 'self', '.', 'type', '==', "'bytes32'", 'else', 'self', '.', 'value', '}'] | Return the parameter as a dictionary.
:return: dict | ['Return', 'the', 'parameter', 'as', 'a', 'dictionary', '.'] | train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/service_agreement_condition.py#L21-L31 |
590 | graphql-python/graphql-core | graphql/language/parser.py | loc | def loc(parser, start):
# type: (Parser, int) -> Optional[Loc]
"""Returns a location object, used to identify the place in
the source that created a given parsed object."""
if parser.options["no_location"]:
return None
if parser.options["no_source"]:
return Loc(start, parser.prev_end)
return Loc(start, parser.prev_end, parser.source) | python | def loc(parser, start):
# type: (Parser, int) -> Optional[Loc]
"""Returns a location object, used to identify the place in
the source that created a given parsed object."""
if parser.options["no_location"]:
return None
if parser.options["no_source"]:
return Loc(start, parser.prev_end)
return Loc(start, parser.prev_end, parser.source) | ['def', 'loc', '(', 'parser', ',', 'start', ')', ':', '# type: (Parser, int) -> Optional[Loc]', 'if', 'parser', '.', 'options', '[', '"no_location"', ']', ':', 'return', 'None', 'if', 'parser', '.', 'options', '[', '"no_source"', ']', ':', 'return', 'Loc', '(', 'start', ',', 'parser', '.', 'prev_end', ')', 'return', 'Loc', '(', 'start', ',', 'parser', '.', 'prev_end', ',', 'parser', '.', 'source', ')'] | Returns a location object, used to identify the place in
the source that created a given parsed object. | ['Returns', 'a', 'location', 'object', 'used', 'to', 'identify', 'the', 'place', 'in', 'the', 'source', 'that', 'created', 'a', 'given', 'parsed', 'object', '.'] | train | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L114-L124 |
591 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/debug.py | Debug.execl | def execl(self, lpCmdLine, **kwargs):
"""
Starts a new process for debugging.
This method uses a command line string. To use a list of arguments
instead, use L{execv}.
@see: L{attach}, L{detach}
@type lpCmdLine: str
@param lpCmdLine: Command line string to execute.
The first token must be the debugee executable filename.
Tokens with spaces must be enclosed in double quotes.
Tokens including double quote characters must be escaped with a
backslash.
@type bBreakOnEntryPoint: bool
@keyword bBreakOnEntryPoint: C{True} to automatically set a breakpoint
at the program entry point. Defaults to C{False}.
@type bConsole: bool
@keyword bConsole: True to inherit the console of the debugger.
Defaults to C{False}.
@type bFollow: bool
@keyword bFollow: C{True} to automatically attach to child processes.
Defaults to C{False}.
@type bInheritHandles: bool
@keyword bInheritHandles: C{True} if the new process should inherit
it's parent process' handles. Defaults to C{False}.
@type bSuspended: bool
@keyword bSuspended: C{True} to suspend the main thread before any code
is executed in the debugee. Defaults to C{False}.
@type dwParentProcessId: int or None
@keyword dwParentProcessId: C{None} or C{0} if the debugger process
should be the parent process (default), or a process ID to
forcefully set as the debugee's parent (only available for Windows
Vista and above).
In hostile mode, the default is not the debugger process but the
process ID for "explorer.exe".
@type iTrustLevel: int
@keyword iTrustLevel: Trust level.
Must be one of the following values:
- 0: B{No trust}. May not access certain resources, such as
cryptographic keys and credentials. Only available since
Windows XP and 2003, desktop editions. This is the default
in hostile mode.
- 1: B{Normal trust}. Run with the same privileges as a normal
user, that is, one that doesn't have the I{Administrator} or
I{Power User} user rights. Only available since Windows XP
and 2003, desktop editions.
- 2: B{Full trust}. Run with the exact same privileges as the
current user. This is the default in normal mode.
@type bAllowElevation: bool
@keyword bAllowElevation: C{True} to allow the child process to keep
UAC elevation, if the debugger itself is running elevated. C{False}
to ensure the child process doesn't run with elevation. Defaults to
C{True} in normal mode and C{False} in hostile mode.
This flag is only meaningful on Windows Vista and above, and if the
debugger itself is running with elevation. It can be used to make
sure the child processes don't run elevated as well.
This flag DOES NOT force an elevation prompt when the debugger is
not running with elevation.
Note that running the debugger with elevation (or the Python
interpreter at all for that matter) is not normally required.
You should only need to if the target program requires elevation
to work properly (for example if you try to debug an installer).
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
"""
if type(lpCmdLine) not in (str, compat.unicode):
warnings.warn("Debug.execl expects a string")
# Set the "debug" flag to True.
kwargs['bDebug'] = True
# Pop the "break on entry point" flag.
bBreakOnEntryPoint = kwargs.pop('bBreakOnEntryPoint', False)
# Set the default trust level if requested.
if 'iTrustLevel' not in kwargs:
if self.__bHostileCode:
kwargs['iTrustLevel'] = 0
else:
kwargs['iTrustLevel'] = 2
# Set the default UAC elevation flag if requested.
if 'bAllowElevation' not in kwargs:
kwargs['bAllowElevation'] = not self.__bHostileCode
# In hostile mode the default parent process is explorer.exe.
# Only supported for Windows Vista and above.
if self.__bHostileCode and not kwargs.get('dwParentProcessId', None):
try:
vista_and_above = self.__vista_and_above
except AttributeError:
osi = win32.OSVERSIONINFOEXW()
osi.dwMajorVersion = 6
osi.dwMinorVersion = 0
osi.dwPlatformId = win32.VER_PLATFORM_WIN32_NT
mask = 0
mask = win32.VerSetConditionMask(mask,
win32.VER_MAJORVERSION,
win32.VER_GREATER_EQUAL)
mask = win32.VerSetConditionMask(mask,
win32.VER_MAJORVERSION,
win32.VER_GREATER_EQUAL)
mask = win32.VerSetConditionMask(mask,
win32.VER_PLATFORMID,
win32.VER_EQUAL)
vista_and_above = win32.VerifyVersionInfoW(osi,
win32.VER_MAJORVERSION | \
win32.VER_MINORVERSION | \
win32.VER_PLATFORMID,
mask)
self.__vista_and_above = vista_and_above
if vista_and_above:
dwParentProcessId = self.system.get_explorer_pid()
if dwParentProcessId:
kwargs['dwParentProcessId'] = dwParentProcessId
else:
msg = ("Failed to find \"explorer.exe\"!"
" Using the debugger as parent process.")
warnings.warn(msg, RuntimeWarning)
# Start the new process.
aProcess = None
try:
aProcess = self.system.start_process(lpCmdLine, **kwargs)
dwProcessId = aProcess.get_pid()
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# Warn when mixing 32 and 64 bits.
# This also allows the user to stop attaching altogether,
# depending on how the warnings are configured.
if System.bits != aProcess.get_bits():
msg = "Mixture of 32 and 64 bits is considered experimental." \
" Use at your own risk!"
warnings.warn(msg, MixedBitsWarning)
# Add the new PID to the set of debugees.
self.__startedDebugees.add(dwProcessId)
# Add the new PID to the set of "break on EP" debugees if needed.
if bBreakOnEntryPoint:
self.__breakOnEP.add(dwProcessId)
# Return the Process object.
return aProcess
# On error kill the new process and raise an exception.
except:
if aProcess is not None:
try:
try:
self.__startedDebugees.remove(aProcess.get_pid())
except KeyError:
pass
finally:
try:
try:
self.__breakOnEP.remove(aProcess.get_pid())
except KeyError:
pass
finally:
try:
aProcess.kill()
except Exception:
pass
raise | python | def execl(self, lpCmdLine, **kwargs):
"""
Starts a new process for debugging.
This method uses a command line string. To use a list of arguments
instead, use L{execv}.
@see: L{attach}, L{detach}
@type lpCmdLine: str
@param lpCmdLine: Command line string to execute.
The first token must be the debugee executable filename.
Tokens with spaces must be enclosed in double quotes.
Tokens including double quote characters must be escaped with a
backslash.
@type bBreakOnEntryPoint: bool
@keyword bBreakOnEntryPoint: C{True} to automatically set a breakpoint
at the program entry point. Defaults to C{False}.
@type bConsole: bool
@keyword bConsole: True to inherit the console of the debugger.
Defaults to C{False}.
@type bFollow: bool
@keyword bFollow: C{True} to automatically attach to child processes.
Defaults to C{False}.
@type bInheritHandles: bool
@keyword bInheritHandles: C{True} if the new process should inherit
it's parent process' handles. Defaults to C{False}.
@type bSuspended: bool
@keyword bSuspended: C{True} to suspend the main thread before any code
is executed in the debugee. Defaults to C{False}.
@type dwParentProcessId: int or None
@keyword dwParentProcessId: C{None} or C{0} if the debugger process
should be the parent process (default), or a process ID to
forcefully set as the debugee's parent (only available for Windows
Vista and above).
In hostile mode, the default is not the debugger process but the
process ID for "explorer.exe".
@type iTrustLevel: int
@keyword iTrustLevel: Trust level.
Must be one of the following values:
- 0: B{No trust}. May not access certain resources, such as
cryptographic keys and credentials. Only available since
Windows XP and 2003, desktop editions. This is the default
in hostile mode.
- 1: B{Normal trust}. Run with the same privileges as a normal
user, that is, one that doesn't have the I{Administrator} or
I{Power User} user rights. Only available since Windows XP
and 2003, desktop editions.
- 2: B{Full trust}. Run with the exact same privileges as the
current user. This is the default in normal mode.
@type bAllowElevation: bool
@keyword bAllowElevation: C{True} to allow the child process to keep
UAC elevation, if the debugger itself is running elevated. C{False}
to ensure the child process doesn't run with elevation. Defaults to
C{True} in normal mode and C{False} in hostile mode.
This flag is only meaningful on Windows Vista and above, and if the
debugger itself is running with elevation. It can be used to make
sure the child processes don't run elevated as well.
This flag DOES NOT force an elevation prompt when the debugger is
not running with elevation.
Note that running the debugger with elevation (or the Python
interpreter at all for that matter) is not normally required.
You should only need to if the target program requires elevation
to work properly (for example if you try to debug an installer).
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
"""
if type(lpCmdLine) not in (str, compat.unicode):
warnings.warn("Debug.execl expects a string")
# Set the "debug" flag to True.
kwargs['bDebug'] = True
# Pop the "break on entry point" flag.
bBreakOnEntryPoint = kwargs.pop('bBreakOnEntryPoint', False)
# Set the default trust level if requested.
if 'iTrustLevel' not in kwargs:
if self.__bHostileCode:
kwargs['iTrustLevel'] = 0
else:
kwargs['iTrustLevel'] = 2
# Set the default UAC elevation flag if requested.
if 'bAllowElevation' not in kwargs:
kwargs['bAllowElevation'] = not self.__bHostileCode
# In hostile mode the default parent process is explorer.exe.
# Only supported for Windows Vista and above.
if self.__bHostileCode and not kwargs.get('dwParentProcessId', None):
try:
vista_and_above = self.__vista_and_above
except AttributeError:
osi = win32.OSVERSIONINFOEXW()
osi.dwMajorVersion = 6
osi.dwMinorVersion = 0
osi.dwPlatformId = win32.VER_PLATFORM_WIN32_NT
mask = 0
mask = win32.VerSetConditionMask(mask,
win32.VER_MAJORVERSION,
win32.VER_GREATER_EQUAL)
mask = win32.VerSetConditionMask(mask,
win32.VER_MAJORVERSION,
win32.VER_GREATER_EQUAL)
mask = win32.VerSetConditionMask(mask,
win32.VER_PLATFORMID,
win32.VER_EQUAL)
vista_and_above = win32.VerifyVersionInfoW(osi,
win32.VER_MAJORVERSION | \
win32.VER_MINORVERSION | \
win32.VER_PLATFORMID,
mask)
self.__vista_and_above = vista_and_above
if vista_and_above:
dwParentProcessId = self.system.get_explorer_pid()
if dwParentProcessId:
kwargs['dwParentProcessId'] = dwParentProcessId
else:
msg = ("Failed to find \"explorer.exe\"!"
" Using the debugger as parent process.")
warnings.warn(msg, RuntimeWarning)
# Start the new process.
aProcess = None
try:
aProcess = self.system.start_process(lpCmdLine, **kwargs)
dwProcessId = aProcess.get_pid()
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# Warn when mixing 32 and 64 bits.
# This also allows the user to stop attaching altogether,
# depending on how the warnings are configured.
if System.bits != aProcess.get_bits():
msg = "Mixture of 32 and 64 bits is considered experimental." \
" Use at your own risk!"
warnings.warn(msg, MixedBitsWarning)
# Add the new PID to the set of debugees.
self.__startedDebugees.add(dwProcessId)
# Add the new PID to the set of "break on EP" debugees if needed.
if bBreakOnEntryPoint:
self.__breakOnEP.add(dwProcessId)
# Return the Process object.
return aProcess
# On error kill the new process and raise an exception.
except:
if aProcess is not None:
try:
try:
self.__startedDebugees.remove(aProcess.get_pid())
except KeyError:
pass
finally:
try:
try:
self.__breakOnEP.remove(aProcess.get_pid())
except KeyError:
pass
finally:
try:
aProcess.kill()
except Exception:
pass
raise | ['def', 'execl', '(', 'self', ',', 'lpCmdLine', ',', '*', '*', 'kwargs', ')', ':', 'if', 'type', '(', 'lpCmdLine', ')', 'not', 'in', '(', 'str', ',', 'compat', '.', 'unicode', ')', ':', 'warnings', '.', 'warn', '(', '"Debug.execl expects a string"', ')', '# Set the "debug" flag to True.', 'kwargs', '[', "'bDebug'", ']', '=', 'True', '# Pop the "break on entry point" flag.', 'bBreakOnEntryPoint', '=', 'kwargs', '.', 'pop', '(', "'bBreakOnEntryPoint'", ',', 'False', ')', '# Set the default trust level if requested.', 'if', "'iTrustLevel'", 'not', 'in', 'kwargs', ':', 'if', 'self', '.', '__bHostileCode', ':', 'kwargs', '[', "'iTrustLevel'", ']', '=', '0', 'else', ':', 'kwargs', '[', "'iTrustLevel'", ']', '=', '2', '# Set the default UAC elevation flag if requested.', 'if', "'bAllowElevation'", 'not', 'in', 'kwargs', ':', 'kwargs', '[', "'bAllowElevation'", ']', '=', 'not', 'self', '.', '__bHostileCode', '# In hostile mode the default parent process is explorer.exe.', '# Only supported for Windows Vista and above.', 'if', 'self', '.', '__bHostileCode', 'and', 'not', 'kwargs', '.', 'get', '(', "'dwParentProcessId'", ',', 'None', ')', ':', 'try', ':', 'vista_and_above', '=', 'self', '.', '__vista_and_above', 'except', 'AttributeError', ':', 'osi', '=', 'win32', '.', 'OSVERSIONINFOEXW', '(', ')', 'osi', '.', 'dwMajorVersion', '=', '6', 'osi', '.', 'dwMinorVersion', '=', '0', 'osi', '.', 'dwPlatformId', '=', 'win32', '.', 'VER_PLATFORM_WIN32_NT', 'mask', '=', '0', 'mask', '=', 'win32', '.', 'VerSetConditionMask', '(', 'mask', ',', 'win32', '.', 'VER_MAJORVERSION', ',', 'win32', '.', 'VER_GREATER_EQUAL', ')', 'mask', '=', 'win32', '.', 'VerSetConditionMask', '(', 'mask', ',', 'win32', '.', 'VER_MAJORVERSION', ',', 'win32', '.', 'VER_GREATER_EQUAL', ')', 'mask', '=', 'win32', '.', 'VerSetConditionMask', '(', 'mask', ',', 'win32', '.', 'VER_PLATFORMID', ',', 'win32', '.', 'VER_EQUAL', ')', 'vista_and_above', '=', 'win32', '.', 'VerifyVersionInfoW', '(', 'osi', ',', 'win32', '.', 'VER_MAJORVERSION', '|', 'win32', '.', 'VER_MINORVERSION', '|', 'win32', '.', 'VER_PLATFORMID', ',', 'mask', ')', 'self', '.', '__vista_and_above', '=', 'vista_and_above', 'if', 'vista_and_above', ':', 'dwParentProcessId', '=', 'self', '.', 'system', '.', 'get_explorer_pid', '(', ')', 'if', 'dwParentProcessId', ':', 'kwargs', '[', "'dwParentProcessId'", ']', '=', 'dwParentProcessId', 'else', ':', 'msg', '=', '(', '"Failed to find \\"explorer.exe\\"!"', '" Using the debugger as parent process."', ')', 'warnings', '.', 'warn', '(', 'msg', ',', 'RuntimeWarning', ')', '# Start the new process.', 'aProcess', '=', 'None', 'try', ':', 'aProcess', '=', 'self', '.', 'system', '.', 'start_process', '(', 'lpCmdLine', ',', '*', '*', 'kwargs', ')', 'dwProcessId', '=', 'aProcess', '.', 'get_pid', '(', ')', '# Match the system kill-on-exit flag to our own.', 'self', '.', '__setSystemKillOnExitMode', '(', ')', '# Warn when mixing 32 and 64 bits.', '# This also allows the user to stop attaching altogether,', '# depending on how the warnings are configured.', 'if', 'System', '.', 'bits', '!=', 'aProcess', '.', 'get_bits', '(', ')', ':', 'msg', '=', '"Mixture of 32 and 64 bits is considered experimental."', '" Use at your own risk!"', 'warnings', '.', 'warn', '(', 'msg', ',', 'MixedBitsWarning', ')', '# Add the new PID to the set of debugees.', 'self', '.', '__startedDebugees', '.', 'add', '(', 'dwProcessId', ')', '# Add the new PID to the set of "break on EP" debugees if needed.', 'if', 'bBreakOnEntryPoint', ':', 'self', '.', '__breakOnEP', '.', 'add', '(', 'dwProcessId', ')', '# Return the Process object.', 'return', 'aProcess', '# On error kill the new process and raise an exception.', 'except', ':', 'if', 'aProcess', 'is', 'not', 'None', ':', 'try', ':', 'try', ':', 'self', '.', '__startedDebugees', '.', 'remove', '(', 'aProcess', '.', 'get_pid', '(', ')', ')', 'except', 'KeyError', ':', 'pass', 'finally', ':', 'try', ':', 'try', ':', 'self', '.', '__breakOnEP', '.', 'remove', '(', 'aProcess', '.', 'get_pid', '(', ')', ')', 'except', 'KeyError', ':', 'pass', 'finally', ':', 'try', ':', 'aProcess', '.', 'kill', '(', ')', 'except', 'Exception', ':', 'pass', 'raise'] | Starts a new process for debugging.
This method uses a command line string. To use a list of arguments
instead, use L{execv}.
@see: L{attach}, L{detach}
@type lpCmdLine: str
@param lpCmdLine: Command line string to execute.
The first token must be the debugee executable filename.
Tokens with spaces must be enclosed in double quotes.
Tokens including double quote characters must be escaped with a
backslash.
@type bBreakOnEntryPoint: bool
@keyword bBreakOnEntryPoint: C{True} to automatically set a breakpoint
at the program entry point. Defaults to C{False}.
@type bConsole: bool
@keyword bConsole: True to inherit the console of the debugger.
Defaults to C{False}.
@type bFollow: bool
@keyword bFollow: C{True} to automatically attach to child processes.
Defaults to C{False}.
@type bInheritHandles: bool
@keyword bInheritHandles: C{True} if the new process should inherit
it's parent process' handles. Defaults to C{False}.
@type bSuspended: bool
@keyword bSuspended: C{True} to suspend the main thread before any code
is executed in the debugee. Defaults to C{False}.
@type dwParentProcessId: int or None
@keyword dwParentProcessId: C{None} or C{0} if the debugger process
should be the parent process (default), or a process ID to
forcefully set as the debugee's parent (only available for Windows
Vista and above).
In hostile mode, the default is not the debugger process but the
process ID for "explorer.exe".
@type iTrustLevel: int
@keyword iTrustLevel: Trust level.
Must be one of the following values:
- 0: B{No trust}. May not access certain resources, such as
cryptographic keys and credentials. Only available since
Windows XP and 2003, desktop editions. This is the default
in hostile mode.
- 1: B{Normal trust}. Run with the same privileges as a normal
user, that is, one that doesn't have the I{Administrator} or
I{Power User} user rights. Only available since Windows XP
and 2003, desktop editions.
- 2: B{Full trust}. Run with the exact same privileges as the
current user. This is the default in normal mode.
@type bAllowElevation: bool
@keyword bAllowElevation: C{True} to allow the child process to keep
UAC elevation, if the debugger itself is running elevated. C{False}
to ensure the child process doesn't run with elevation. Defaults to
C{True} in normal mode and C{False} in hostile mode.
This flag is only meaningful on Windows Vista and above, and if the
debugger itself is running with elevation. It can be used to make
sure the child processes don't run elevated as well.
This flag DOES NOT force an elevation prompt when the debugger is
not running with elevation.
Note that running the debugger with elevation (or the Python
interpreter at all for that matter) is not normally required.
You should only need to if the target program requires elevation
to work properly (for example if you try to debug an installer).
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error. | ['Starts', 'a', 'new', 'process', 'for', 'debugging', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/debug.py#L350-L534 |
592 | nugget/python-insteonplm | insteonplm/__init__.py | Connection.dump_conndata | def dump_conndata(self):
"""Developer tool for debugging forensics."""
attrs = vars(self)
return ', '.join("%s: %s" % item for item in attrs.items()) | python | def dump_conndata(self):
"""Developer tool for debugging forensics."""
attrs = vars(self)
return ', '.join("%s: %s" % item for item in attrs.items()) | ['def', 'dump_conndata', '(', 'self', ')', ':', 'attrs', '=', 'vars', '(', 'self', ')', 'return', "', '", '.', 'join', '(', '"%s: %s"', '%', 'item', 'for', 'item', 'in', 'attrs', '.', 'items', '(', ')', ')'] | Developer tool for debugging forensics. | ['Developer', 'tool', 'for', 'debugging', 'forensics', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/__init__.py#L220-L223 |
593 | ets-labs/python-domain-models | domain_models/views.py | ContextViewMetaClass.get_properties | def get_properties(attributes):
"""Return tuple of names of defined properties.
:type attributes: dict
:rtype: list
"""
return [key for key, value in six.iteritems(attributes)
if isinstance(value, property)] | python | def get_properties(attributes):
"""Return tuple of names of defined properties.
:type attributes: dict
:rtype: list
"""
return [key for key, value in six.iteritems(attributes)
if isinstance(value, property)] | ['def', 'get_properties', '(', 'attributes', ')', ':', 'return', '[', 'key', 'for', 'key', ',', 'value', 'in', 'six', '.', 'iteritems', '(', 'attributes', ')', 'if', 'isinstance', '(', 'value', ',', 'property', ')', ']'] | Return tuple of names of defined properties.
:type attributes: dict
:rtype: list | ['Return', 'tuple', 'of', 'names', 'of', 'defined', 'properties', '.'] | train | https://github.com/ets-labs/python-domain-models/blob/7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9/domain_models/views.py#L76-L83 |
594 | SKA-ScienceDataProcessor/integration-prototype | sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py | SchedulingBlockInstance.abort | def abort(self):
"""Abort the SBI (and associated PBs)."""
self.set_status('aborted')
DB.remove_from_list('{}:active'.format(self._type), self._id)
DB.append_to_list('{}:aborted'.format(self._type), self._id)
sbi_pb_ids = ast.literal_eval(
DB.get_hash_value(self._key, 'processing_block_ids'))
for pb_id in sbi_pb_ids:
pb = ProcessingBlock(pb_id)
pb.abort() | python | def abort(self):
"""Abort the SBI (and associated PBs)."""
self.set_status('aborted')
DB.remove_from_list('{}:active'.format(self._type), self._id)
DB.append_to_list('{}:aborted'.format(self._type), self._id)
sbi_pb_ids = ast.literal_eval(
DB.get_hash_value(self._key, 'processing_block_ids'))
for pb_id in sbi_pb_ids:
pb = ProcessingBlock(pb_id)
pb.abort() | ['def', 'abort', '(', 'self', ')', ':', 'self', '.', 'set_status', '(', "'aborted'", ')', 'DB', '.', 'remove_from_list', '(', "'{}:active'", '.', 'format', '(', 'self', '.', '_type', ')', ',', 'self', '.', '_id', ')', 'DB', '.', 'append_to_list', '(', "'{}:aborted'", '.', 'format', '(', 'self', '.', '_type', ')', ',', 'self', '.', '_id', ')', 'sbi_pb_ids', '=', 'ast', '.', 'literal_eval', '(', 'DB', '.', 'get_hash_value', '(', 'self', '.', '_key', ',', "'processing_block_ids'", ')', ')', 'for', 'pb_id', 'in', 'sbi_pb_ids', ':', 'pb', '=', 'ProcessingBlock', '(', 'pb_id', ')', 'pb', '.', 'abort', '(', ')'] | Abort the SBI (and associated PBs). | ['Abort', 'the', 'SBI', '(', 'and', 'associated', 'PBs', ')', '.'] | train | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/scheduling_block_instance.py#L114-L124 |
595 | cuihantao/andes | andes/models/jit.py | JIT.jit_load | def jit_load(self):
"""
Import and instantiate this JIT object
Returns
-------
"""
try:
model = importlib.import_module('.' + self.model, 'andes.models')
device = getattr(model, self.device)
self.system.__dict__[self.name] = device(self.system, self.name)
g = self.system.__dict__[self.name]._group
self.system.group_add(g)
self.system.__dict__[g].register_model(self.name)
# register device after loading
self.system.devman.register_device(self.name)
self.loaded = 1
logger.debug('Imported model <{:s}.{:s}>.'.format(
self.model, self.device))
except ImportError:
logger.error(
'non-JIT model <{:s}.{:s}> import error'
.format(self.model, self.device))
except AttributeError:
logger.error(
'model <{:s}.{:s}> not exist. Check models/__init__.py'
.format(self.model, self.device)) | python | def jit_load(self):
"""
Import and instantiate this JIT object
Returns
-------
"""
try:
model = importlib.import_module('.' + self.model, 'andes.models')
device = getattr(model, self.device)
self.system.__dict__[self.name] = device(self.system, self.name)
g = self.system.__dict__[self.name]._group
self.system.group_add(g)
self.system.__dict__[g].register_model(self.name)
# register device after loading
self.system.devman.register_device(self.name)
self.loaded = 1
logger.debug('Imported model <{:s}.{:s}>.'.format(
self.model, self.device))
except ImportError:
logger.error(
'non-JIT model <{:s}.{:s}> import error'
.format(self.model, self.device))
except AttributeError:
logger.error(
'model <{:s}.{:s}> not exist. Check models/__init__.py'
.format(self.model, self.device)) | ['def', 'jit_load', '(', 'self', ')', ':', 'try', ':', 'model', '=', 'importlib', '.', 'import_module', '(', "'.'", '+', 'self', '.', 'model', ',', "'andes.models'", ')', 'device', '=', 'getattr', '(', 'model', ',', 'self', '.', 'device', ')', 'self', '.', 'system', '.', '__dict__', '[', 'self', '.', 'name', ']', '=', 'device', '(', 'self', '.', 'system', ',', 'self', '.', 'name', ')', 'g', '=', 'self', '.', 'system', '.', '__dict__', '[', 'self', '.', 'name', ']', '.', '_group', 'self', '.', 'system', '.', 'group_add', '(', 'g', ')', 'self', '.', 'system', '.', '__dict__', '[', 'g', ']', '.', 'register_model', '(', 'self', '.', 'name', ')', '# register device after loading', 'self', '.', 'system', '.', 'devman', '.', 'register_device', '(', 'self', '.', 'name', ')', 'self', '.', 'loaded', '=', '1', 'logger', '.', 'debug', '(', "'Imported model <{:s}.{:s}>.'", '.', 'format', '(', 'self', '.', 'model', ',', 'self', '.', 'device', ')', ')', 'except', 'ImportError', ':', 'logger', '.', 'error', '(', "'non-JIT model <{:s}.{:s}> import error'", '.', 'format', '(', 'self', '.', 'model', ',', 'self', '.', 'device', ')', ')', 'except', 'AttributeError', ':', 'logger', '.', 'error', '(', "'model <{:s}.{:s}> not exist. Check models/__init__.py'", '.', 'format', '(', 'self', '.', 'model', ',', 'self', '.', 'device', ')', ')'] | Import and instantiate this JIT object
Returns
------- | ['Import', 'and', 'instantiate', 'this', 'JIT', 'object'] | train | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/jit.py#L20-L49 |
596 | saltstack/salt | salt/modules/cassandra_cql.py | _connect | def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None,
protocol_version=None):
'''
Connect to a Cassandra cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
:type contact_points: str or list of str
:param cql_user: The Cassandra user if authentication is turned on.
:type cql_user: str
:param cql_pass: The Cassandra user password if authentication is turned on.
:type cql_pass: str
:param port: The Cassandra cluster port, defaults to None.
:type port: int
:param protocol_version: Cassandra protocol version to use.
:type port: int
:return: The session and cluster objects.
:rtype: cluster object, session object
'''
# Lazy load the Cassandra cluster and session for this module by creating a
# cluster and session when cql_query is called the first time. Get the
# Cassandra cluster and session from this module's __context__ after it is
# loaded the first time cql_query is called.
#
# TODO: Call cluster.shutdown() when the module is unloaded on
# master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown()
# do nothing to allow loaded modules to gracefully handle resources stored
# in __context__ (i.e. connection pools). This means that the connection
# pool is orphaned and Salt relies on Cassandra to reclaim connections.
# Perhaps if Master/Minion daemons could be enhanced to call an "__unload__"
# function, or something similar for each loaded module, connection pools
# and the like can be gracefully reclaimed/shutdown.
if (__context__
and 'cassandra_cql_returner_cluster' in __context__
and 'cassandra_cql_returner_session' in __context__):
return __context__['cassandra_cql_returner_cluster'], __context__['cassandra_cql_returner_session']
else:
contact_points = _load_properties(property_name=contact_points, config_option='cluster')
contact_points = contact_points if isinstance(contact_points, list) else contact_points.split(',')
port = _load_properties(property_name=port, config_option='port', set_default=True, default=9042)
cql_user = _load_properties(property_name=cql_user, config_option='username', set_default=True, default="cassandra")
cql_pass = _load_properties(property_name=cql_pass, config_option='password', set_default=True, default="cassandra")
protocol_version = _load_properties(property_name=protocol_version,
config_option='protocol_version',
set_default=True, default=4)
try:
auth_provider = PlainTextAuthProvider(username=cql_user, password=cql_pass)
ssl_opts = _get_ssl_opts()
if ssl_opts:
cluster = Cluster(contact_points,
port=port,
auth_provider=auth_provider,
ssl_options=ssl_opts,
protocol_version=protocol_version,
compression=True)
else:
cluster = Cluster(contact_points, port=port,
auth_provider=auth_provider,
protocol_version=protocol_version,
compression=True)
for recontimes in range(1, 4):
try:
session = cluster.connect()
break
except OperationTimedOut:
log.warning('Cassandra cluster.connect timed out, try %s', recontimes)
if recontimes >= 3:
raise
# TODO: Call cluster.shutdown() when the module is unloaded on shutdown.
__context__['cassandra_cql_returner_cluster'] = cluster
__context__['cassandra_cql_returner_session'] = session
__context__['cassandra_cql_prepared'] = {}
log.debug('Successfully connected to Cassandra cluster at %s', contact_points)
return cluster, session
except TypeError:
pass
except (ConnectionException, ConnectionShutdown, NoHostAvailable):
log.error('Could not connect to Cassandra cluster at %s', contact_points)
raise CommandExecutionError('ERROR: Could not connect to Cassandra cluster.') | python | def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None,
protocol_version=None):
'''
Connect to a Cassandra cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
:type contact_points: str or list of str
:param cql_user: The Cassandra user if authentication is turned on.
:type cql_user: str
:param cql_pass: The Cassandra user password if authentication is turned on.
:type cql_pass: str
:param port: The Cassandra cluster port, defaults to None.
:type port: int
:param protocol_version: Cassandra protocol version to use.
:type port: int
:return: The session and cluster objects.
:rtype: cluster object, session object
'''
# Lazy load the Cassandra cluster and session for this module by creating a
# cluster and session when cql_query is called the first time. Get the
# Cassandra cluster and session from this module's __context__ after it is
# loaded the first time cql_query is called.
#
# TODO: Call cluster.shutdown() when the module is unloaded on
# master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown()
# do nothing to allow loaded modules to gracefully handle resources stored
# in __context__ (i.e. connection pools). This means that the connection
# pool is orphaned and Salt relies on Cassandra to reclaim connections.
# Perhaps if Master/Minion daemons could be enhanced to call an "__unload__"
# function, or something similar for each loaded module, connection pools
# and the like can be gracefully reclaimed/shutdown.
if (__context__
and 'cassandra_cql_returner_cluster' in __context__
and 'cassandra_cql_returner_session' in __context__):
return __context__['cassandra_cql_returner_cluster'], __context__['cassandra_cql_returner_session']
else:
contact_points = _load_properties(property_name=contact_points, config_option='cluster')
contact_points = contact_points if isinstance(contact_points, list) else contact_points.split(',')
port = _load_properties(property_name=port, config_option='port', set_default=True, default=9042)
cql_user = _load_properties(property_name=cql_user, config_option='username', set_default=True, default="cassandra")
cql_pass = _load_properties(property_name=cql_pass, config_option='password', set_default=True, default="cassandra")
protocol_version = _load_properties(property_name=protocol_version,
config_option='protocol_version',
set_default=True, default=4)
try:
auth_provider = PlainTextAuthProvider(username=cql_user, password=cql_pass)
ssl_opts = _get_ssl_opts()
if ssl_opts:
cluster = Cluster(contact_points,
port=port,
auth_provider=auth_provider,
ssl_options=ssl_opts,
protocol_version=protocol_version,
compression=True)
else:
cluster = Cluster(contact_points, port=port,
auth_provider=auth_provider,
protocol_version=protocol_version,
compression=True)
for recontimes in range(1, 4):
try:
session = cluster.connect()
break
except OperationTimedOut:
log.warning('Cassandra cluster.connect timed out, try %s', recontimes)
if recontimes >= 3:
raise
# TODO: Call cluster.shutdown() when the module is unloaded on shutdown.
__context__['cassandra_cql_returner_cluster'] = cluster
__context__['cassandra_cql_returner_session'] = session
__context__['cassandra_cql_prepared'] = {}
log.debug('Successfully connected to Cassandra cluster at %s', contact_points)
return cluster, session
except TypeError:
pass
except (ConnectionException, ConnectionShutdown, NoHostAvailable):
log.error('Could not connect to Cassandra cluster at %s', contact_points)
raise CommandExecutionError('ERROR: Could not connect to Cassandra cluster.') | ['def', '_connect', '(', 'contact_points', '=', 'None', ',', 'port', '=', 'None', ',', 'cql_user', '=', 'None', ',', 'cql_pass', '=', 'None', ',', 'protocol_version', '=', 'None', ')', ':', '# Lazy load the Cassandra cluster and session for this module by creating a', '# cluster and session when cql_query is called the first time. Get the', "# Cassandra cluster and session from this module's __context__ after it is", '# loaded the first time cql_query is called.', '#', '# TODO: Call cluster.shutdown() when the module is unloaded on', '# master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown()', '# do nothing to allow loaded modules to gracefully handle resources stored', '# in __context__ (i.e. connection pools). This means that the connection', '# pool is orphaned and Salt relies on Cassandra to reclaim connections.', '# Perhaps if Master/Minion daemons could be enhanced to call an "__unload__"', '# function, or something similar for each loaded module, connection pools', '# and the like can be gracefully reclaimed/shutdown.', 'if', '(', '__context__', 'and', "'cassandra_cql_returner_cluster'", 'in', '__context__', 'and', "'cassandra_cql_returner_session'", 'in', '__context__', ')', ':', 'return', '__context__', '[', "'cassandra_cql_returner_cluster'", ']', ',', '__context__', '[', "'cassandra_cql_returner_session'", ']', 'else', ':', 'contact_points', '=', '_load_properties', '(', 'property_name', '=', 'contact_points', ',', 'config_option', '=', "'cluster'", ')', 'contact_points', '=', 'contact_points', 'if', 'isinstance', '(', 'contact_points', ',', 'list', ')', 'else', 'contact_points', '.', 'split', '(', "','", ')', 'port', '=', '_load_properties', '(', 'property_name', '=', 'port', ',', 'config_option', '=', "'port'", ',', 'set_default', '=', 'True', ',', 'default', '=', '9042', ')', 'cql_user', '=', '_load_properties', '(', 'property_name', '=', 'cql_user', ',', 'config_option', '=', "'username'", ',', 'set_default', '=', 'True', ',', 'default', '=', '"cassandra"', ')', 'cql_pass', '=', '_load_properties', '(', 'property_name', '=', 'cql_pass', ',', 'config_option', '=', "'password'", ',', 'set_default', '=', 'True', ',', 'default', '=', '"cassandra"', ')', 'protocol_version', '=', '_load_properties', '(', 'property_name', '=', 'protocol_version', ',', 'config_option', '=', "'protocol_version'", ',', 'set_default', '=', 'True', ',', 'default', '=', '4', ')', 'try', ':', 'auth_provider', '=', 'PlainTextAuthProvider', '(', 'username', '=', 'cql_user', ',', 'password', '=', 'cql_pass', ')', 'ssl_opts', '=', '_get_ssl_opts', '(', ')', 'if', 'ssl_opts', ':', 'cluster', '=', 'Cluster', '(', 'contact_points', ',', 'port', '=', 'port', ',', 'auth_provider', '=', 'auth_provider', ',', 'ssl_options', '=', 'ssl_opts', ',', 'protocol_version', '=', 'protocol_version', ',', 'compression', '=', 'True', ')', 'else', ':', 'cluster', '=', 'Cluster', '(', 'contact_points', ',', 'port', '=', 'port', ',', 'auth_provider', '=', 'auth_provider', ',', 'protocol_version', '=', 'protocol_version', ',', 'compression', '=', 'True', ')', 'for', 'recontimes', 'in', 'range', '(', '1', ',', '4', ')', ':', 'try', ':', 'session', '=', 'cluster', '.', 'connect', '(', ')', 'break', 'except', 'OperationTimedOut', ':', 'log', '.', 'warning', '(', "'Cassandra cluster.connect timed out, try %s'", ',', 'recontimes', ')', 'if', 'recontimes', '>=', '3', ':', 'raise', '# TODO: Call cluster.shutdown() when the module is unloaded on shutdown.', '__context__', '[', "'cassandra_cql_returner_cluster'", ']', '=', 'cluster', '__context__', '[', "'cassandra_cql_returner_session'", ']', '=', 'session', '__context__', '[', "'cassandra_cql_prepared'", ']', '=', '{', '}', 'log', '.', 'debug', '(', "'Successfully connected to Cassandra cluster at %s'", ',', 'contact_points', ')', 'return', 'cluster', ',', 'session', 'except', 'TypeError', ':', 'pass', 'except', '(', 'ConnectionException', ',', 'ConnectionShutdown', ',', 'NoHostAvailable', ')', ':', 'log', '.', 'error', '(', "'Could not connect to Cassandra cluster at %s'", ',', 'contact_points', ')', 'raise', 'CommandExecutionError', '(', "'ERROR: Could not connect to Cassandra cluster.'", ')'] | Connect to a Cassandra cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
:type contact_points: str or list of str
:param cql_user: The Cassandra user if authentication is turned on.
:type cql_user: str
:param cql_pass: The Cassandra user password if authentication is turned on.
:type cql_pass: str
:param port: The Cassandra cluster port, defaults to None.
:type port: int
:param protocol_version: Cassandra protocol version to use.
:type port: int
:return: The session and cluster objects.
:rtype: cluster object, session object | ['Connect', 'to', 'a', 'Cassandra', 'cluster', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cassandra_cql.py#L201-L282 |
597 | SHTOOLS/SHTOOLS | setup.py | build.run | def run(self):
"""Build the Fortran library, all python extensions and the docs."""
print('---- BUILDING ----')
_build.run(self)
# build documentation
print('---- BUILDING DOCS ----')
docdir = os.path.join(self.build_lib, 'pyshtools', 'doc')
self.mkpath(docdir)
doc_builder = os.path.join(self.build_lib, 'pyshtools', 'make_docs.py')
doc_source = '.'
check_call([sys.executable, doc_builder, doc_source, self.build_lib])
print('---- ALL DONE ----') | python | def run(self):
"""Build the Fortran library, all python extensions and the docs."""
print('---- BUILDING ----')
_build.run(self)
# build documentation
print('---- BUILDING DOCS ----')
docdir = os.path.join(self.build_lib, 'pyshtools', 'doc')
self.mkpath(docdir)
doc_builder = os.path.join(self.build_lib, 'pyshtools', 'make_docs.py')
doc_source = '.'
check_call([sys.executable, doc_builder, doc_source, self.build_lib])
print('---- ALL DONE ----') | ['def', 'run', '(', 'self', ')', ':', 'print', '(', "'---- BUILDING ----'", ')', '_build', '.', 'run', '(', 'self', ')', '# build documentation', 'print', '(', "'---- BUILDING DOCS ----'", ')', 'docdir', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'build_lib', ',', "'pyshtools'", ',', "'doc'", ')', 'self', '.', 'mkpath', '(', 'docdir', ')', 'doc_builder', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'build_lib', ',', "'pyshtools'", ',', "'make_docs.py'", ')', 'doc_source', '=', "'.'", 'check_call', '(', '[', 'sys', '.', 'executable', ',', 'doc_builder', ',', 'doc_source', ',', 'self', '.', 'build_lib', ']', ')', 'print', '(', "'---- ALL DONE ----'", ')'] | Build the Fortran library, all python extensions and the docs. | ['Build', 'the', 'Fortran', 'library', 'all', 'python', 'extensions', 'and', 'the', 'docs', '.'] | train | https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/setup.py#L98-L111 |
598 | aparo/pyes | pyes/models.py | ElasticSearchModel.get_id | def get_id(self):
""" Force the object saveing to get an id"""
_id = self._meta.get("id", None)
if _id is None:
_id = self.save()
return _id | python | def get_id(self):
""" Force the object saveing to get an id"""
_id = self._meta.get("id", None)
if _id is None:
_id = self.save()
return _id | ['def', 'get_id', '(', 'self', ')', ':', '_id', '=', 'self', '.', '_meta', '.', 'get', '(', '"id"', ',', 'None', ')', 'if', '_id', 'is', 'None', ':', '_id', '=', 'self', '.', 'save', '(', ')', 'return', '_id'] | Force the object saveing to get an id | ['Force', 'the', 'object', 'saveing', 'to', 'get', 'an', 'id'] | train | https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/models.py#L98-L103 |
599 | honzajavorek/redis-collections | redis_collections/sortedsets.py | SortedSetCounter.discard_between | def discard_between(
self,
min_rank=None,
max_rank=None,
min_score=None,
max_score=None,
):
"""
Remove members whose ranking is between *min_rank* and *max_rank*
OR whose score is between *min_score* and *max_score* (both ranges
inclusive). If no bounds are specified, no members will be removed.
"""
no_ranks = (min_rank is None) and (max_rank is None)
no_scores = (min_score is None) and (max_score is None)
# Default scope: nothing
if no_ranks and no_scores:
return
# Scope widens to given score range
if no_ranks and (not no_scores):
return self.discard_by_score(min_score, max_score)
# Scope widens to given rank range
if (not no_ranks) and no_scores:
return self.discard_by_rank(min_rank, max_rank)
# Scope widens to score range and then rank range
with self.redis.pipeline() as pipe:
self.discard_by_score(min_score, max_score, pipe)
self.discard_by_rank(min_rank, max_rank, pipe)
pipe.execute() | python | def discard_between(
self,
min_rank=None,
max_rank=None,
min_score=None,
max_score=None,
):
"""
Remove members whose ranking is between *min_rank* and *max_rank*
OR whose score is between *min_score* and *max_score* (both ranges
inclusive). If no bounds are specified, no members will be removed.
"""
no_ranks = (min_rank is None) and (max_rank is None)
no_scores = (min_score is None) and (max_score is None)
# Default scope: nothing
if no_ranks and no_scores:
return
# Scope widens to given score range
if no_ranks and (not no_scores):
return self.discard_by_score(min_score, max_score)
# Scope widens to given rank range
if (not no_ranks) and no_scores:
return self.discard_by_rank(min_rank, max_rank)
# Scope widens to score range and then rank range
with self.redis.pipeline() as pipe:
self.discard_by_score(min_score, max_score, pipe)
self.discard_by_rank(min_rank, max_rank, pipe)
pipe.execute() | ['def', 'discard_between', '(', 'self', ',', 'min_rank', '=', 'None', ',', 'max_rank', '=', 'None', ',', 'min_score', '=', 'None', ',', 'max_score', '=', 'None', ',', ')', ':', 'no_ranks', '=', '(', 'min_rank', 'is', 'None', ')', 'and', '(', 'max_rank', 'is', 'None', ')', 'no_scores', '=', '(', 'min_score', 'is', 'None', ')', 'and', '(', 'max_score', 'is', 'None', ')', '# Default scope: nothing', 'if', 'no_ranks', 'and', 'no_scores', ':', 'return', '# Scope widens to given score range', 'if', 'no_ranks', 'and', '(', 'not', 'no_scores', ')', ':', 'return', 'self', '.', 'discard_by_score', '(', 'min_score', ',', 'max_score', ')', '# Scope widens to given rank range', 'if', '(', 'not', 'no_ranks', ')', 'and', 'no_scores', ':', 'return', 'self', '.', 'discard_by_rank', '(', 'min_rank', ',', 'max_rank', ')', '# Scope widens to score range and then rank range', 'with', 'self', '.', 'redis', '.', 'pipeline', '(', ')', 'as', 'pipe', ':', 'self', '.', 'discard_by_score', '(', 'min_score', ',', 'max_score', ',', 'pipe', ')', 'self', '.', 'discard_by_rank', '(', 'min_rank', ',', 'max_rank', ',', 'pipe', ')', 'pipe', '.', 'execute', '(', ')'] | Remove members whose ranking is between *min_rank* and *max_rank*
OR whose score is between *min_score* and *max_score* (both ranges
inclusive). If no bounds are specified, no members will be removed. | ['Remove', 'members', 'whose', 'ranking', 'is', 'between', '*', 'min_rank', '*', 'and', '*', 'max_rank', '*', 'OR', 'whose', 'score', 'is', 'between', '*', 'min_score', '*', 'and', '*', 'max_score', '*', '(', 'both', 'ranges', 'inclusive', ')', '.', 'If', 'no', 'bounds', 'are', 'specified', 'no', 'members', 'will', 'be', 'removed', '.'] | train | https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sortedsets.py#L204-L235 |