repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
consbio/gis-metadata-parser | gis_metadata/utils.py | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L262-L275 | def has_property(elem_to_parse, xpath):
"""
Parse xpath for any attribute reference "path/@attr" and check for root and presence of attribute.
:return: True if xpath is present in the element along with any attribute referenced, otherwise False
"""
xroot, attr = get_xpath_tuple(xpath)
if not xroot and not attr:
return False
elif not attr:
return bool(get_elements_text(elem_to_parse, xroot))
else:
return bool(get_elements_attributes(elem_to_parse, xroot, attr)) | [
"def",
"has_property",
"(",
"elem_to_parse",
",",
"xpath",
")",
":",
"xroot",
",",
"attr",
"=",
"get_xpath_tuple",
"(",
"xpath",
")",
"if",
"not",
"xroot",
"and",
"not",
"attr",
":",
"return",
"False",
"elif",
"not",
"attr",
":",
"return",
"bool",
"(",
"get_elements_text",
"(",
"elem_to_parse",
",",
"xroot",
")",
")",
"else",
":",
"return",
"bool",
"(",
"get_elements_attributes",
"(",
"elem_to_parse",
",",
"xroot",
",",
"attr",
")",
")"
] | Parse xpath for any attribute reference "path/@attr" and check for root and presence of attribute.
:return: True if xpath is present in the element along with any attribute referenced, otherwise False | [
"Parse",
"xpath",
"for",
"any",
"attribute",
"reference",
"path",
"/"
] | python | train | 36.357143 |
google/grumpy | third_party/stdlib/fpformat.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/fpformat.py#L106-L136 | def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e | [
"def",
"sci",
"(",
"x",
",",
"digs",
")",
":",
"if",
"type",
"(",
"x",
")",
"!=",
"type",
"(",
"''",
")",
":",
"x",
"=",
"repr",
"(",
"x",
")",
"sign",
",",
"intpart",
",",
"fraction",
",",
"expo",
"=",
"extract",
"(",
"x",
")",
"if",
"not",
"intpart",
":",
"while",
"fraction",
"and",
"fraction",
"[",
"0",
"]",
"==",
"'0'",
":",
"fraction",
"=",
"fraction",
"[",
"1",
":",
"]",
"expo",
"=",
"expo",
"-",
"1",
"if",
"fraction",
":",
"intpart",
",",
"fraction",
"=",
"fraction",
"[",
"0",
"]",
",",
"fraction",
"[",
"1",
":",
"]",
"expo",
"=",
"expo",
"-",
"1",
"else",
":",
"intpart",
"=",
"'0'",
"else",
":",
"expo",
"=",
"expo",
"+",
"len",
"(",
"intpart",
")",
"-",
"1",
"intpart",
",",
"fraction",
"=",
"intpart",
"[",
"0",
"]",
",",
"intpart",
"[",
"1",
":",
"]",
"+",
"fraction",
"digs",
"=",
"max",
"(",
"0",
",",
"digs",
")",
"intpart",
",",
"fraction",
"=",
"roundfrac",
"(",
"intpart",
",",
"fraction",
",",
"digs",
")",
"if",
"len",
"(",
"intpart",
")",
">",
"1",
":",
"intpart",
",",
"fraction",
",",
"expo",
"=",
"intpart",
"[",
"0",
"]",
",",
"intpart",
"[",
"1",
":",
"]",
"+",
"fraction",
"[",
":",
"-",
"1",
"]",
",",
"expo",
"+",
"len",
"(",
"intpart",
")",
"-",
"1",
"s",
"=",
"sign",
"+",
"intpart",
"if",
"digs",
">",
"0",
":",
"s",
"=",
"s",
"+",
"'.'",
"+",
"fraction",
"e",
"=",
"repr",
"(",
"abs",
"(",
"expo",
")",
")",
"e",
"=",
"'0'",
"*",
"(",
"3",
"-",
"len",
"(",
"e",
")",
")",
"+",
"e",
"if",
"expo",
"<",
"0",
":",
"e",
"=",
"'-'",
"+",
"e",
"else",
":",
"e",
"=",
"'+'",
"+",
"e",
"return",
"s",
"+",
"'e'",
"+",
"e"
] | Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed. | [
"Format",
"x",
"as",
"[",
"-",
"]",
"d",
".",
"dddE",
"[",
"+",
"-",
"]",
"ddd",
"with",
"digs",
"digits",
"after",
"the",
"point",
"and",
"exactly",
"one",
"digit",
"before",
".",
"If",
"digs",
"is",
"<",
"=",
"0",
"one",
"digit",
"is",
"kept",
"and",
"the",
"point",
"is",
"suppressed",
"."
] | python | valid | 34.290323 |
atztogo/phonopy | phonopy/api_phonopy.py | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/api_phonopy.py#L1252-L1277 | def set_iter_mesh(self,
mesh,
shift=None,
is_time_reversal=True,
is_mesh_symmetry=True,
is_eigenvectors=False,
is_gamma_center=False):
"""Create an IterMesh instancer
Attributes
----------
See set_mesh method.
"""
warnings.warn("Phonopy.set_iter_mesh is deprecated. "
"Use Phonopy.run_mesh with use_iter_mesh=True.",
DeprecationWarning)
self.run_mesh(mesh=mesh,
shift=shift,
is_time_reversal=is_time_reversal,
is_mesh_symmetry=is_mesh_symmetry,
with_eigenvectors=is_eigenvectors,
is_gamma_center=is_gamma_center,
use_iter_mesh=True) | [
"def",
"set_iter_mesh",
"(",
"self",
",",
"mesh",
",",
"shift",
"=",
"None",
",",
"is_time_reversal",
"=",
"True",
",",
"is_mesh_symmetry",
"=",
"True",
",",
"is_eigenvectors",
"=",
"False",
",",
"is_gamma_center",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Phonopy.set_iter_mesh is deprecated. \"",
"\"Use Phonopy.run_mesh with use_iter_mesh=True.\"",
",",
"DeprecationWarning",
")",
"self",
".",
"run_mesh",
"(",
"mesh",
"=",
"mesh",
",",
"shift",
"=",
"shift",
",",
"is_time_reversal",
"=",
"is_time_reversal",
",",
"is_mesh_symmetry",
"=",
"is_mesh_symmetry",
",",
"with_eigenvectors",
"=",
"is_eigenvectors",
",",
"is_gamma_center",
"=",
"is_gamma_center",
",",
"use_iter_mesh",
"=",
"True",
")"
] | Create an IterMesh instancer
Attributes
----------
See set_mesh method. | [
"Create",
"an",
"IterMesh",
"instancer"
] | python | train | 33.653846 |
bluekeyes/sphinx-javalink | javalink/__init__.py | https://github.com/bluekeyes/sphinx-javalink/blob/490e37506efa53e95ad88a665e347536e75b6254/javalink/__init__.py#L61-L95 | def find_rt_jar(javahome=None):
"""Find the path to the Java standard library jar.
The jar is expected to exist at the path 'jre/lib/rt.jar' inside a
standard Java installation directory. The directory is found using
the following procedure:
1. If the javehome argument is provided, use the value as the
directory.
2. If the JAVA_HOME environment variable is set, use the value as
the directory.
3. Find the location of the ``java`` binary in the current PATH and
compute the installation directory from this location.
Args:
javahome: A path to a Java installation directory (optional).
"""
if not javahome:
if 'JAVA_HOME' in os.environ:
javahome = os.environ['JAVA_HOME']
elif sys.platform == 'darwin':
# The default java binary on OS X is not part of a standard Oracle
# install, so building paths relative to it does not work like it
# does on other platforms.
javahome = _find_osx_javahome()
else:
javahome = _get_javahome_from_java(_find_java_binary())
rtpath = os.path.join(javahome, 'jre', 'lib', 'rt.jar')
if not os.path.isfile(rtpath):
msg = 'Could not find rt.jar: {} is not a file'.format(rtpath)
raise ExtensionError(msg)
return rtpath | [
"def",
"find_rt_jar",
"(",
"javahome",
"=",
"None",
")",
":",
"if",
"not",
"javahome",
":",
"if",
"'JAVA_HOME'",
"in",
"os",
".",
"environ",
":",
"javahome",
"=",
"os",
".",
"environ",
"[",
"'JAVA_HOME'",
"]",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# The default java binary on OS X is not part of a standard Oracle",
"# install, so building paths relative to it does not work like it",
"# does on other platforms.",
"javahome",
"=",
"_find_osx_javahome",
"(",
")",
"else",
":",
"javahome",
"=",
"_get_javahome_from_java",
"(",
"_find_java_binary",
"(",
")",
")",
"rtpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"javahome",
",",
"'jre'",
",",
"'lib'",
",",
"'rt.jar'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"rtpath",
")",
":",
"msg",
"=",
"'Could not find rt.jar: {} is not a file'",
".",
"format",
"(",
"rtpath",
")",
"raise",
"ExtensionError",
"(",
"msg",
")",
"return",
"rtpath"
] | Find the path to the Java standard library jar.
The jar is expected to exist at the path 'jre/lib/rt.jar' inside a
standard Java installation directory. The directory is found using
the following procedure:
1. If the javehome argument is provided, use the value as the
directory.
2. If the JAVA_HOME environment variable is set, use the value as
the directory.
3. Find the location of the ``java`` binary in the current PATH and
compute the installation directory from this location.
Args:
javahome: A path to a Java installation directory (optional). | [
"Find",
"the",
"path",
"to",
"the",
"Java",
"standard",
"library",
"jar",
"."
] | python | train | 37.485714 |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L171-L184 | def show_raslog_output_show_all_raslog_raslog_entries_log_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_raslog = ET.Element("show_raslog")
config = show_raslog
output = ET.SubElement(show_raslog, "output")
show_all_raslog = ET.SubElement(output, "show-all-raslog")
raslog_entries = ET.SubElement(show_all_raslog, "raslog-entries")
log_type = ET.SubElement(raslog_entries, "log-type")
log_type.text = kwargs.pop('log_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"show_raslog_output_show_all_raslog_raslog_entries_log_type",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_raslog",
"=",
"ET",
".",
"Element",
"(",
"\"show_raslog\"",
")",
"config",
"=",
"show_raslog",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"show_raslog",
",",
"\"output\"",
")",
"show_all_raslog",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"show-all-raslog\"",
")",
"raslog_entries",
"=",
"ET",
".",
"SubElement",
"(",
"show_all_raslog",
",",
"\"raslog-entries\"",
")",
"log_type",
"=",
"ET",
".",
"SubElement",
"(",
"raslog_entries",
",",
"\"log-type\"",
")",
"log_type",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'log_type'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train | 44.142857 |
pydata/xarray | xarray/core/alignment.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/alignment.py#L172-L228 | def deep_align(objects, join='inner', copy=True, indexes=None,
exclude=frozenset(), raise_on_invalid=True):
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
from .dataarray import DataArray
from .dataset import Dataset
if indexes is None:
indexes = {}
def is_alignable(obj):
return isinstance(obj, (DataArray, Dataset))
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for n, variables in enumerate(objects):
if is_alignable(variables):
positions.append(n)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
positions.append(n)
keys.append(k)
targets.append(v)
out.append(OrderedDict(variables))
elif raise_on_invalid:
raise ValueError('object to align is neither an xarray.Dataset, '
'an xarray.DataArray nor a dictionary: %r'
% variables)
else:
out.append(variables)
aligned = align(*targets, join=join, copy=copy, indexes=indexes,
exclude=exclude)
for position, key, aligned_obj in zip(positions, keys, aligned):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
# something went wrong: we should have replaced all sentinel values
assert all(arg is not not_replaced for arg in out)
return out | [
"def",
"deep_align",
"(",
"objects",
",",
"join",
"=",
"'inner'",
",",
"copy",
"=",
"True",
",",
"indexes",
"=",
"None",
",",
"exclude",
"=",
"frozenset",
"(",
")",
",",
"raise_on_invalid",
"=",
"True",
")",
":",
"from",
".",
"dataarray",
"import",
"DataArray",
"from",
".",
"dataset",
"import",
"Dataset",
"if",
"indexes",
"is",
"None",
":",
"indexes",
"=",
"{",
"}",
"def",
"is_alignable",
"(",
"obj",
")",
":",
"return",
"isinstance",
"(",
"obj",
",",
"(",
"DataArray",
",",
"Dataset",
")",
")",
"positions",
"=",
"[",
"]",
"keys",
"=",
"[",
"]",
"out",
"=",
"[",
"]",
"targets",
"=",
"[",
"]",
"no_key",
"=",
"object",
"(",
")",
"not_replaced",
"=",
"object",
"(",
")",
"for",
"n",
",",
"variables",
"in",
"enumerate",
"(",
"objects",
")",
":",
"if",
"is_alignable",
"(",
"variables",
")",
":",
"positions",
".",
"append",
"(",
"n",
")",
"keys",
".",
"append",
"(",
"no_key",
")",
"targets",
".",
"append",
"(",
"variables",
")",
"out",
".",
"append",
"(",
"not_replaced",
")",
"elif",
"is_dict_like",
"(",
"variables",
")",
":",
"for",
"k",
",",
"v",
"in",
"variables",
".",
"items",
"(",
")",
":",
"if",
"is_alignable",
"(",
"v",
")",
"and",
"k",
"not",
"in",
"indexes",
":",
"# Skip variables in indexes for alignment, because these",
"# should to be overwritten instead:",
"# https://github.com/pydata/xarray/issues/725",
"positions",
".",
"append",
"(",
"n",
")",
"keys",
".",
"append",
"(",
"k",
")",
"targets",
".",
"append",
"(",
"v",
")",
"out",
".",
"append",
"(",
"OrderedDict",
"(",
"variables",
")",
")",
"elif",
"raise_on_invalid",
":",
"raise",
"ValueError",
"(",
"'object to align is neither an xarray.Dataset, '",
"'an xarray.DataArray nor a dictionary: %r'",
"%",
"variables",
")",
"else",
":",
"out",
".",
"append",
"(",
"variables",
")",
"aligned",
"=",
"align",
"(",
"*",
"targets",
",",
"join",
"=",
"join",
",",
"copy",
"=",
"copy",
",",
"indexes",
"=",
"indexes",
",",
"exclude",
"=",
"exclude",
")",
"for",
"position",
",",
"key",
",",
"aligned_obj",
"in",
"zip",
"(",
"positions",
",",
"keys",
",",
"aligned",
")",
":",
"if",
"key",
"is",
"no_key",
":",
"out",
"[",
"position",
"]",
"=",
"aligned_obj",
"else",
":",
"out",
"[",
"position",
"]",
"[",
"key",
"]",
"=",
"aligned_obj",
"# something went wrong: we should have replaced all sentinel values",
"assert",
"all",
"(",
"arg",
"is",
"not",
"not_replaced",
"for",
"arg",
"in",
"out",
")",
"return",
"out"
] | Align objects for merging, recursing into dictionary values.
This function is not public API. | [
"Align",
"objects",
"for",
"merging",
"recursing",
"into",
"dictionary",
"values",
"."
] | python | train | 33.754386 |
openstack/networking-cisco | networking_cisco/ml2_drivers/nexus/nexus_restapi_network_driver.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_restapi_network_driver.py#L254-L264 | def _apply_user_port_channel_config(self, nexus_host, vpc_nbr):
"""Adds STP and no lacp suspend config to port channel. """
cli_cmds = self._get_user_port_channel_config(nexus_host, vpc_nbr)
if cli_cmds:
self._send_cli_conf_string(nexus_host, cli_cmds)
else:
vpc_str = str(vpc_nbr)
path_snip = snipp.PATH_ALL
body_snip = snipp.BODY_ADD_PORT_CH_P2 % (vpc_str, vpc_str)
self.send_edit_string(nexus_host, path_snip, body_snip) | [
"def",
"_apply_user_port_channel_config",
"(",
"self",
",",
"nexus_host",
",",
"vpc_nbr",
")",
":",
"cli_cmds",
"=",
"self",
".",
"_get_user_port_channel_config",
"(",
"nexus_host",
",",
"vpc_nbr",
")",
"if",
"cli_cmds",
":",
"self",
".",
"_send_cli_conf_string",
"(",
"nexus_host",
",",
"cli_cmds",
")",
"else",
":",
"vpc_str",
"=",
"str",
"(",
"vpc_nbr",
")",
"path_snip",
"=",
"snipp",
".",
"PATH_ALL",
"body_snip",
"=",
"snipp",
".",
"BODY_ADD_PORT_CH_P2",
"%",
"(",
"vpc_str",
",",
"vpc_str",
")",
"self",
".",
"send_edit_string",
"(",
"nexus_host",
",",
"path_snip",
",",
"body_snip",
")"
] | Adds STP and no lacp suspend config to port channel. | [
"Adds",
"STP",
"and",
"no",
"lacp",
"suspend",
"config",
"to",
"port",
"channel",
"."
] | python | train | 46 |
PonteIneptique/collatinus-python | pycollatinus/parser.py | https://github.com/PonteIneptique/collatinus-python/blob/fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5/pycollatinus/parser.py#L380-L397 | def ajModeles(self):
""" Lecture des modèles, et enregistrement de leurs désinences
"""
sl = []
lines = [line for line in lignesFichier(self.path("modeles.la"))]
max = len(lines) - 1
for i, l in enumerate(lines):
if l.startswith('$'):
varname, value = tuple(l.split("="))
self.lemmatiseur._variables[varname] = value
continue
eclats = l.split(":")
if (eclats[0] == "modele" or i == max) and len(sl) > 0:
m = self.parse_modele(sl)
self.register_modele(m)
sl = []
sl.append(l) | [
"def",
"ajModeles",
"(",
"self",
")",
":",
"sl",
"=",
"[",
"]",
"lines",
"=",
"[",
"line",
"for",
"line",
"in",
"lignesFichier",
"(",
"self",
".",
"path",
"(",
"\"modeles.la\"",
")",
")",
"]",
"max",
"=",
"len",
"(",
"lines",
")",
"-",
"1",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"l",
".",
"startswith",
"(",
"'$'",
")",
":",
"varname",
",",
"value",
"=",
"tuple",
"(",
"l",
".",
"split",
"(",
"\"=\"",
")",
")",
"self",
".",
"lemmatiseur",
".",
"_variables",
"[",
"varname",
"]",
"=",
"value",
"continue",
"eclats",
"=",
"l",
".",
"split",
"(",
"\":\"",
")",
"if",
"(",
"eclats",
"[",
"0",
"]",
"==",
"\"modele\"",
"or",
"i",
"==",
"max",
")",
"and",
"len",
"(",
"sl",
")",
">",
"0",
":",
"m",
"=",
"self",
".",
"parse_modele",
"(",
"sl",
")",
"self",
".",
"register_modele",
"(",
"m",
")",
"sl",
"=",
"[",
"]",
"sl",
".",
"append",
"(",
"l",
")"
] | Lecture des modèles, et enregistrement de leurs désinences | [
"Lecture",
"des",
"modèles",
"et",
"enregistrement",
"de",
"leurs",
"désinences"
] | python | train | 36.111111 |
census-instrumentation/opencensus-python | opencensus/trace/propagation/binary_format.py | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/propagation/binary_format.py#L95-L136 | def from_header(self, binary):
"""Generate a SpanContext object using the trace context header.
The value of enabled parsed from header is int. Need to convert to
bool.
:type binary: bytes
:param binary: Trace context header which was extracted from the
request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
# If no binary provided, generate a new SpanContext
if binary is None:
return span_context_module.SpanContext(from_header=False)
# If cannot parse, return a new SpanContext and ignore the context
# from binary.
try:
data = Header._make(struct.unpack(BINARY_FORMAT, binary))
except struct.error:
logging.warning(
'Cannot parse the incoming binary data {}, '
'wrong format. Total bytes length should be {}.'.format(
binary, FORMAT_LENGTH
)
)
return span_context_module.SpanContext(from_header=False)
# data.trace_id is in bytes with length 16, hexlify it to hex bytes
# with length 32, then decode it to hex string using utf-8.
trace_id = str(binascii.hexlify(data.trace_id).decode(UTF8))
span_id = str(binascii.hexlify(data.span_id).decode(UTF8))
trace_options = TraceOptions(data.trace_option)
span_context = span_context_module.SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=trace_options,
from_header=True)
return span_context | [
"def",
"from_header",
"(",
"self",
",",
"binary",
")",
":",
"# If no binary provided, generate a new SpanContext",
"if",
"binary",
"is",
"None",
":",
"return",
"span_context_module",
".",
"SpanContext",
"(",
"from_header",
"=",
"False",
")",
"# If cannot parse, return a new SpanContext and ignore the context",
"# from binary.",
"try",
":",
"data",
"=",
"Header",
".",
"_make",
"(",
"struct",
".",
"unpack",
"(",
"BINARY_FORMAT",
",",
"binary",
")",
")",
"except",
"struct",
".",
"error",
":",
"logging",
".",
"warning",
"(",
"'Cannot parse the incoming binary data {}, '",
"'wrong format. Total bytes length should be {}.'",
".",
"format",
"(",
"binary",
",",
"FORMAT_LENGTH",
")",
")",
"return",
"span_context_module",
".",
"SpanContext",
"(",
"from_header",
"=",
"False",
")",
"# data.trace_id is in bytes with length 16, hexlify it to hex bytes",
"# with length 32, then decode it to hex string using utf-8.",
"trace_id",
"=",
"str",
"(",
"binascii",
".",
"hexlify",
"(",
"data",
".",
"trace_id",
")",
".",
"decode",
"(",
"UTF8",
")",
")",
"span_id",
"=",
"str",
"(",
"binascii",
".",
"hexlify",
"(",
"data",
".",
"span_id",
")",
".",
"decode",
"(",
"UTF8",
")",
")",
"trace_options",
"=",
"TraceOptions",
"(",
"data",
".",
"trace_option",
")",
"span_context",
"=",
"span_context_module",
".",
"SpanContext",
"(",
"trace_id",
"=",
"trace_id",
",",
"span_id",
"=",
"span_id",
",",
"trace_options",
"=",
"trace_options",
",",
"from_header",
"=",
"True",
")",
"return",
"span_context"
] | Generate a SpanContext object using the trace context header.
The value of enabled parsed from header is int. Need to convert to
bool.
:type binary: bytes
:param binary: Trace context header which was extracted from the
request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header. | [
"Generate",
"a",
"SpanContext",
"object",
"using",
"the",
"trace",
"context",
"header",
".",
"The",
"value",
"of",
"enabled",
"parsed",
"from",
"header",
"is",
"int",
".",
"Need",
"to",
"convert",
"to",
"bool",
"."
] | python | train | 40.238095 |
f3at/feat | src/feat/models/utils.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/utils.py#L26-L31 | def mk_class_name(*parts):
"""Create a valid class name from a list of strings."""
cap = lambda s: s and (s[0].capitalize() + s[1:])
return "".join(["".join([cap(i)
for i in re.split("[\ \-\_\.]", str(p))])
for p in parts]) | [
"def",
"mk_class_name",
"(",
"*",
"parts",
")",
":",
"cap",
"=",
"lambda",
"s",
":",
"s",
"and",
"(",
"s",
"[",
"0",
"]",
".",
"capitalize",
"(",
")",
"+",
"s",
"[",
"1",
":",
"]",
")",
"return",
"\"\"",
".",
"join",
"(",
"[",
"\"\"",
".",
"join",
"(",
"[",
"cap",
"(",
"i",
")",
"for",
"i",
"in",
"re",
".",
"split",
"(",
"\"[\\ \\-\\_\\.]\"",
",",
"str",
"(",
"p",
")",
")",
"]",
")",
"for",
"p",
"in",
"parts",
"]",
")"
] | Create a valid class name from a list of strings. | [
"Create",
"a",
"valid",
"class",
"name",
"from",
"a",
"list",
"of",
"strings",
"."
] | python | train | 46.5 |
xenadevel/PyXenaManager | xenamanager/api/xena_rest.py | https://github.com/xenadevel/PyXenaManager/blob/384ca265f73044b8a8b471f5dd7a6103fc54f4df/xenamanager/api/xena_rest.py#L69-L78 | def send_command_return(self, obj, command, *arguments):
""" Send command with single line output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: command output.
"""
return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.line_output,
*arguments).json() | [
"def",
"send_command_return",
"(",
"self",
",",
"obj",
",",
"command",
",",
"*",
"arguments",
")",
":",
"return",
"self",
".",
"_perform_command",
"(",
"'{}/{}'",
".",
"format",
"(",
"self",
".",
"session_url",
",",
"obj",
".",
"ref",
")",
",",
"command",
",",
"OperReturnType",
".",
"line_output",
",",
"*",
"arguments",
")",
".",
"json",
"(",
")"
] | Send command with single line output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: command output. | [
"Send",
"command",
"with",
"single",
"line",
"output",
"."
] | python | train | 44.8 |
dereneaton/ipyrad | ipyrad/analysis/structure.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/structure.py#L626-L690 | def _get_clumpp_table(self, kpop, max_var_multiple, quiet):
""" private function to clumpp results"""
## concat results for k=x
reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet)
if reps:
ninds = reps[0].inds
nreps = len(reps)
else:
ninds = nreps = 0
if not reps:
return "no result files found"
clumphandle = os.path.join(self.workdir, "tmp.clumppparams.txt")
self.clumppparams.kpop = kpop
self.clumppparams.c = ninds
self.clumppparams.r = nreps
with open(clumphandle, 'w') as tmp_c:
tmp_c.write(self.clumppparams._asfile())
## create CLUMPP args string
outfile = os.path.join(self.workdir,
"{}-K-{}.outfile".format(self.name, kpop))
indfile = os.path.join(self.workdir,
"{}-K-{}.indfile".format(self.name, kpop))
miscfile = os.path.join(self.workdir,
"{}-K-{}.miscfile".format(self.name, kpop))
cmd = ["CLUMPP", clumphandle,
"-i", indfile,
"-o", outfile,
"-j", miscfile,
"-r", str(nreps),
"-c", str(ninds),
"-k", str(kpop)]
## call clumpp
proc = subprocess.Popen(cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
_ = proc.communicate()
## cleanup
for rfile in [indfile, miscfile]:
if os.path.exists(rfile):
os.remove(rfile)
## parse clumpp results file
ofile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop))
if os.path.exists(ofile):
csvtable = pd.read_csv(ofile, delim_whitespace=True, header=None)
table = csvtable.loc[:, 5:]
## apply names to cols and rows
table.columns = range(table.shape[1])
table.index = self.labels
if not quiet:
sys.stderr.write(
"[K{}] {}/{} results permuted across replicates (max_var={}).\n"\
.format(kpop, nreps, nreps+excluded, max_var_multiple))
return table
else:
sys.stderr.write("No files ready for {}-K-{} in {}\n"\
.format(self.name, kpop, self.workdir))
return | [
"def",
"_get_clumpp_table",
"(",
"self",
",",
"kpop",
",",
"max_var_multiple",
",",
"quiet",
")",
":",
"## concat results for k=x",
"reps",
",",
"excluded",
"=",
"_concat_reps",
"(",
"self",
",",
"kpop",
",",
"max_var_multiple",
",",
"quiet",
")",
"if",
"reps",
":",
"ninds",
"=",
"reps",
"[",
"0",
"]",
".",
"inds",
"nreps",
"=",
"len",
"(",
"reps",
")",
"else",
":",
"ninds",
"=",
"nreps",
"=",
"0",
"if",
"not",
"reps",
":",
"return",
"\"no result files found\"",
"clumphandle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"tmp.clumppparams.txt\"",
")",
"self",
".",
"clumppparams",
".",
"kpop",
"=",
"kpop",
"self",
".",
"clumppparams",
".",
"c",
"=",
"ninds",
"self",
".",
"clumppparams",
".",
"r",
"=",
"nreps",
"with",
"open",
"(",
"clumphandle",
",",
"'w'",
")",
"as",
"tmp_c",
":",
"tmp_c",
".",
"write",
"(",
"self",
".",
"clumppparams",
".",
"_asfile",
"(",
")",
")",
"## create CLUMPP args string",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"{}-K-{}.outfile\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"kpop",
")",
")",
"indfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"{}-K-{}.indfile\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"kpop",
")",
")",
"miscfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"{}-K-{}.miscfile\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"kpop",
")",
")",
"cmd",
"=",
"[",
"\"CLUMPP\"",
",",
"clumphandle",
",",
"\"-i\"",
",",
"indfile",
",",
"\"-o\"",
",",
"outfile",
",",
"\"-j\"",
",",
"miscfile",
",",
"\"-r\"",
",",
"str",
"(",
"nreps",
")",
",",
"\"-c\"",
",",
"str",
"(",
"ninds",
")",
",",
"\"-k\"",
",",
"str",
"(",
"kpop",
")",
"]",
"## call clumpp",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"_",
"=",
"proc",
".",
"communicate",
"(",
")",
"## cleanup",
"for",
"rfile",
"in",
"[",
"indfile",
",",
"miscfile",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"rfile",
")",
":",
"os",
".",
"remove",
"(",
"rfile",
")",
"## parse clumpp results file",
"ofile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"{}-K-{}.outfile\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"kpop",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ofile",
")",
":",
"csvtable",
"=",
"pd",
".",
"read_csv",
"(",
"ofile",
",",
"delim_whitespace",
"=",
"True",
",",
"header",
"=",
"None",
")",
"table",
"=",
"csvtable",
".",
"loc",
"[",
":",
",",
"5",
":",
"]",
"## apply names to cols and rows",
"table",
".",
"columns",
"=",
"range",
"(",
"table",
".",
"shape",
"[",
"1",
"]",
")",
"table",
".",
"index",
"=",
"self",
".",
"labels",
"if",
"not",
"quiet",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"[K{}] {}/{} results permuted across replicates (max_var={}).\\n\"",
".",
"format",
"(",
"kpop",
",",
"nreps",
",",
"nreps",
"+",
"excluded",
",",
"max_var_multiple",
")",
")",
"return",
"table",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"No files ready for {}-K-{} in {}\\n\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"kpop",
",",
"self",
".",
"workdir",
")",
")",
"return"
] | private function to clumpp results | [
"private",
"function",
"to",
"clumpp",
"results"
] | python | valid | 33.461538 |
SoCo/SoCo | soco/data_structures.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/data_structures.py#L619-L668 | def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: an Element.
"""
elt_attrib = {}
if include_namespaces:
elt_attrib.update({
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
elt_attrib.update({
'parentID': self.parent_id,
'restricted': 'true' if self.restricted else 'false',
'id': self.item_id
})
elt = XML.Element(self.tag, elt_attrib)
# Add the title, which should always come first, according to the spec
XML.SubElement(elt, 'dc:title').text = self.title
# Add in any resources
for resource in self.resources:
elt.append(resource.to_element())
# Add the rest of the metadata attributes (i.e all those listed in
# _translation) as sub-elements of the item element.
for key, value in self._translation.items():
if hasattr(self, key):
# Some attributes have a namespace of '', which means they
# are in the default namespace. We need to handle those
# carefully
tag = "%s:%s" % value if value[0] else "%s" % value[1]
XML.SubElement(elt, tag).text = ("%s" % getattr(self, key))
# Now add in the item class
XML.SubElement(elt, 'upnp:class').text = self.item_class
# And the desc element
desc_attrib = {'id': 'cdudn', 'nameSpace':
'urn:schemas-rinconnetworks-com:metadata-1-0/'}
desc_elt = XML.SubElement(elt, 'desc', desc_attrib)
desc_elt.text = self.desc
return elt | [
"def",
"to_element",
"(",
"self",
",",
"include_namespaces",
"=",
"False",
")",
":",
"elt_attrib",
"=",
"{",
"}",
"if",
"include_namespaces",
":",
"elt_attrib",
".",
"update",
"(",
"{",
"'xmlns'",
":",
"\"urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/\"",
",",
"'xmlns:dc'",
":",
"\"http://purl.org/dc/elements/1.1/\"",
",",
"'xmlns:upnp'",
":",
"\"urn:schemas-upnp-org:metadata-1-0/upnp/\"",
",",
"}",
")",
"elt_attrib",
".",
"update",
"(",
"{",
"'parentID'",
":",
"self",
".",
"parent_id",
",",
"'restricted'",
":",
"'true'",
"if",
"self",
".",
"restricted",
"else",
"'false'",
",",
"'id'",
":",
"self",
".",
"item_id",
"}",
")",
"elt",
"=",
"XML",
".",
"Element",
"(",
"self",
".",
"tag",
",",
"elt_attrib",
")",
"# Add the title, which should always come first, according to the spec",
"XML",
".",
"SubElement",
"(",
"elt",
",",
"'dc:title'",
")",
".",
"text",
"=",
"self",
".",
"title",
"# Add in any resources",
"for",
"resource",
"in",
"self",
".",
"resources",
":",
"elt",
".",
"append",
"(",
"resource",
".",
"to_element",
"(",
")",
")",
"# Add the rest of the metadata attributes (i.e all those listed in",
"# _translation) as sub-elements of the item element.",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_translation",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"key",
")",
":",
"# Some attributes have a namespace of '', which means they",
"# are in the default namespace. We need to handle those",
"# carefully",
"tag",
"=",
"\"%s:%s\"",
"%",
"value",
"if",
"value",
"[",
"0",
"]",
"else",
"\"%s\"",
"%",
"value",
"[",
"1",
"]",
"XML",
".",
"SubElement",
"(",
"elt",
",",
"tag",
")",
".",
"text",
"=",
"(",
"\"%s\"",
"%",
"getattr",
"(",
"self",
",",
"key",
")",
")",
"# Now add in the item class",
"XML",
".",
"SubElement",
"(",
"elt",
",",
"'upnp:class'",
")",
".",
"text",
"=",
"self",
".",
"item_class",
"# And the desc element",
"desc_attrib",
"=",
"{",
"'id'",
":",
"'cdudn'",
",",
"'nameSpace'",
":",
"'urn:schemas-rinconnetworks-com:metadata-1-0/'",
"}",
"desc_elt",
"=",
"XML",
".",
"SubElement",
"(",
"elt",
",",
"'desc'",
",",
"desc_attrib",
")",
"desc_elt",
".",
"text",
"=",
"self",
".",
"desc",
"return",
"elt"
] | Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: an Element. | [
"Return",
"an",
"ElementTree",
"Element",
"representing",
"this",
"instance",
"."
] | python | train | 40.04 |
tensorflow/tensor2tensor | tensor2tensor/utils/optimize.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L301-L327 | def log_variable_sizes(var_list=None, tag=None, verbose=False):
"""Log the sizes and shapes of variables, and the total size.
Args:
var_list: a list of variables; defaults to trainable_variables
tag: a string; defaults to "Trainable Variables"
verbose: bool, if True, log every weight; otherwise, log total size only.
"""
if var_list is None:
var_list = tf.trainable_variables()
if tag is None:
tag = "Trainable Variables"
if not var_list:
return
name_to_var = {v.name: v for v in var_list}
total_size = 0
for v_name in sorted(list(name_to_var)):
v = name_to_var[v_name]
v_size = int(np.prod(np.array(v.shape.as_list())))
if verbose:
tf.logging.info("Weight %s\tshape %s\tsize %d",
v.name[:-2].ljust(80),
str(v.shape).ljust(20), v_size)
total_size += v_size
tf.logging.info("%s Total size: %d", tag, total_size) | [
"def",
"log_variable_sizes",
"(",
"var_list",
"=",
"None",
",",
"tag",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"var_list",
"is",
"None",
":",
"var_list",
"=",
"tf",
".",
"trainable_variables",
"(",
")",
"if",
"tag",
"is",
"None",
":",
"tag",
"=",
"\"Trainable Variables\"",
"if",
"not",
"var_list",
":",
"return",
"name_to_var",
"=",
"{",
"v",
".",
"name",
":",
"v",
"for",
"v",
"in",
"var_list",
"}",
"total_size",
"=",
"0",
"for",
"v_name",
"in",
"sorted",
"(",
"list",
"(",
"name_to_var",
")",
")",
":",
"v",
"=",
"name_to_var",
"[",
"v_name",
"]",
"v_size",
"=",
"int",
"(",
"np",
".",
"prod",
"(",
"np",
".",
"array",
"(",
"v",
".",
"shape",
".",
"as_list",
"(",
")",
")",
")",
")",
"if",
"verbose",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Weight %s\\tshape %s\\tsize %d\"",
",",
"v",
".",
"name",
"[",
":",
"-",
"2",
"]",
".",
"ljust",
"(",
"80",
")",
",",
"str",
"(",
"v",
".",
"shape",
")",
".",
"ljust",
"(",
"20",
")",
",",
"v_size",
")",
"total_size",
"+=",
"v_size",
"tf",
".",
"logging",
".",
"info",
"(",
"\"%s Total size: %d\"",
",",
"tag",
",",
"total_size",
")"
] | Log the sizes and shapes of variables, and the total size.
Args:
var_list: a list of variables; defaults to trainable_variables
tag: a string; defaults to "Trainable Variables"
verbose: bool, if True, log every weight; otherwise, log total size only. | [
"Log",
"the",
"sizes",
"and",
"shapes",
"of",
"variables",
"and",
"the",
"total",
"size",
"."
] | python | train | 33.592593 |
SheffieldML/GPy | GPy/likelihoods/student_t.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/student_t.py#L71-L97 | def logpdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)
:param inv_link_f: latent variables (link(f))
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
e = y - inv_link_f
#FIXME:
#Why does np.log(1 + (1/self.v)*((y-inv_link_f)**2)/self.sigma2) suppress the divide by zero?!
#But np.log(1 + (1/float(self.v))*((y-inv_link_f)**2)/self.sigma2) throws it correctly
#print - 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))
objective = (+ gammaln((self.v + 1) * 0.5)
- gammaln(self.v * 0.5)
- 0.5*np.log(self.sigma2 * self.v * np.pi)
- 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))
)
return objective | [
"def",
"logpdf_link",
"(",
"self",
",",
"inv_link_f",
",",
"y",
",",
"Y_metadata",
"=",
"None",
")",
":",
"e",
"=",
"y",
"-",
"inv_link_f",
"#FIXME:",
"#Why does np.log(1 + (1/self.v)*((y-inv_link_f)**2)/self.sigma2) suppress the divide by zero?!",
"#But np.log(1 + (1/float(self.v))*((y-inv_link_f)**2)/self.sigma2) throws it correctly",
"#print - 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))",
"objective",
"=",
"(",
"+",
"gammaln",
"(",
"(",
"self",
".",
"v",
"+",
"1",
")",
"*",
"0.5",
")",
"-",
"gammaln",
"(",
"self",
".",
"v",
"*",
"0.5",
")",
"-",
"0.5",
"*",
"np",
".",
"log",
"(",
"self",
".",
"sigma2",
"*",
"self",
".",
"v",
"*",
"np",
".",
"pi",
")",
"-",
"0.5",
"*",
"(",
"self",
".",
"v",
"+",
"1",
")",
"*",
"np",
".",
"log",
"(",
"1",
"+",
"(",
"1",
"/",
"np",
".",
"float",
"(",
"self",
".",
"v",
")",
")",
"*",
"(",
"(",
"e",
"**",
"2",
")",
"/",
"self",
".",
"sigma2",
")",
")",
")",
"return",
"objective"
] | Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)
:param inv_link_f: latent variables (link(f))
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float | [
"Log",
"Likelihood",
"Function",
"given",
"link",
"(",
"f",
")"
] | python | train | 48.925926 |
saltstack/salt | salt/cloud/clouds/opennebula.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L2429-L2485 | def template_delete(call=None, kwargs=None):
'''
Deletes the given template from OpenNebula. Either a name or a template_id must
be supplied.
.. versionadded:: 2016.3.0
name
The name of the template to delete. Can be used instead of ``template_id``.
template_id
The ID of the template to delete. Can be used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f template_delete opennebula name=my-template
salt-cloud --function template_delete opennebula template_id=5
'''
if call != 'function':
raise SaltCloudSystemExit(
'The template_delete function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
template_id = kwargs.get('template_id', None)
if template_id:
if name:
log.warning(
'Both the \'template_id\' and \'name\' arguments were provided. '
'\'template_id\' will take precedence.'
)
elif name:
template_id = get_template_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The template_delete function requires either a \'name\' or a \'template_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.template.delete(auth, int(template_id))
data = {
'action': 'template.delete',
'deleted': response[0],
'template_id': response[1],
'error_code': response[2],
}
return data | [
"def",
"template_delete",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The template_delete function must be called with -f or --function.'",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"name",
"=",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"None",
")",
"template_id",
"=",
"kwargs",
".",
"get",
"(",
"'template_id'",
",",
"None",
")",
"if",
"template_id",
":",
"if",
"name",
":",
"log",
".",
"warning",
"(",
"'Both the \\'template_id\\' and \\'name\\' arguments were provided. '",
"'\\'template_id\\' will take precedence.'",
")",
"elif",
"name",
":",
"template_id",
"=",
"get_template_id",
"(",
"kwargs",
"=",
"{",
"'name'",
":",
"name",
"}",
")",
"else",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The template_delete function requires either a \\'name\\' or a \\'template_id\\' '",
"'to be provided.'",
")",
"server",
",",
"user",
",",
"password",
"=",
"_get_xml_rpc",
"(",
")",
"auth",
"=",
"':'",
".",
"join",
"(",
"[",
"user",
",",
"password",
"]",
")",
"response",
"=",
"server",
".",
"one",
".",
"template",
".",
"delete",
"(",
"auth",
",",
"int",
"(",
"template_id",
")",
")",
"data",
"=",
"{",
"'action'",
":",
"'template.delete'",
",",
"'deleted'",
":",
"response",
"[",
"0",
"]",
",",
"'template_id'",
":",
"response",
"[",
"1",
"]",
",",
"'error_code'",
":",
"response",
"[",
"2",
"]",
",",
"}",
"return",
"data"
] | Deletes the given template from OpenNebula. Either a name or a template_id must
be supplied.
.. versionadded:: 2016.3.0
name
The name of the template to delete. Can be used instead of ``template_id``.
template_id
The ID of the template to delete. Can be used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f template_delete opennebula name=my-template
salt-cloud --function template_delete opennebula template_id=5 | [
"Deletes",
"the",
"given",
"template",
"from",
"OpenNebula",
".",
"Either",
"a",
"name",
"or",
"a",
"template_id",
"must",
"be",
"supplied",
"."
] | python | train | 27.77193 |
PyCQA/astroid | astroid/scoped_nodes.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L2240-L2252 | def instance_attr_ancestors(self, name, context=None):
"""Iterate over the parents that define the given name as an attribute.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name as
an instance attribute.
:rtype: iterable(NodeNG)
"""
for astroid in self.ancestors(context=context):
if name in astroid.instance_attrs:
yield astroid | [
"def",
"instance_attr_ancestors",
"(",
"self",
",",
"name",
",",
"context",
"=",
"None",
")",
":",
"for",
"astroid",
"in",
"self",
".",
"ancestors",
"(",
"context",
"=",
"context",
")",
":",
"if",
"name",
"in",
"astroid",
".",
"instance_attrs",
":",
"yield",
"astroid"
] | Iterate over the parents that define the given name as an attribute.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name as
an instance attribute.
:rtype: iterable(NodeNG) | [
"Iterate",
"over",
"the",
"parents",
"that",
"define",
"the",
"given",
"name",
"as",
"an",
"attribute",
"."
] | python | train | 36.615385 |
manns/pyspread | pyspread/src/gui/_toolbars.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_toolbars.py#L225-L253 | def OnUpdate(self, event):
"""Updates the toolbar states"""
# Gray out undo and redo id not available
undo_toolid = self.label2id["Undo"]
redo_toolid = self.label2id["Redo"]
self.EnableTool(undo_toolid, undo.stack().canundo())
self.EnableTool(redo_toolid, undo.stack().canredo())
# Set ToolTip strings to potential next undo / redo action
undotext = undo.stack().undotext()
undo_tool = self.FindTool(undo_toolid)
if undotext is None:
undo_tool.SetShortHelp(_("No undo actions available"))
else:
undo_tool.SetShortHelp(undotext)
redotext = undo.stack().redotext()
redo_tool = self.FindTool(redo_toolid)
if redotext is None:
redo_tool.SetShortHelp(_("No redo actions available"))
else:
redo_tool.SetShortHelp(redotext)
self.Refresh()
event.Skip() | [
"def",
"OnUpdate",
"(",
"self",
",",
"event",
")",
":",
"# Gray out undo and redo id not available",
"undo_toolid",
"=",
"self",
".",
"label2id",
"[",
"\"Undo\"",
"]",
"redo_toolid",
"=",
"self",
".",
"label2id",
"[",
"\"Redo\"",
"]",
"self",
".",
"EnableTool",
"(",
"undo_toolid",
",",
"undo",
".",
"stack",
"(",
")",
".",
"canundo",
"(",
")",
")",
"self",
".",
"EnableTool",
"(",
"redo_toolid",
",",
"undo",
".",
"stack",
"(",
")",
".",
"canredo",
"(",
")",
")",
"# Set ToolTip strings to potential next undo / redo action",
"undotext",
"=",
"undo",
".",
"stack",
"(",
")",
".",
"undotext",
"(",
")",
"undo_tool",
"=",
"self",
".",
"FindTool",
"(",
"undo_toolid",
")",
"if",
"undotext",
"is",
"None",
":",
"undo_tool",
".",
"SetShortHelp",
"(",
"_",
"(",
"\"No undo actions available\"",
")",
")",
"else",
":",
"undo_tool",
".",
"SetShortHelp",
"(",
"undotext",
")",
"redotext",
"=",
"undo",
".",
"stack",
"(",
")",
".",
"redotext",
"(",
")",
"redo_tool",
"=",
"self",
".",
"FindTool",
"(",
"redo_toolid",
")",
"if",
"redotext",
"is",
"None",
":",
"redo_tool",
".",
"SetShortHelp",
"(",
"_",
"(",
"\"No redo actions available\"",
")",
")",
"else",
":",
"redo_tool",
".",
"SetShortHelp",
"(",
"redotext",
")",
"self",
".",
"Refresh",
"(",
")",
"event",
".",
"Skip",
"(",
")"
] | Updates the toolbar states | [
"Updates",
"the",
"toolbar",
"states"
] | python | train | 31.275862 |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/collection.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L468-L477 | def _item_to_document_ref(iterator, item):
"""Convert Document resource to document ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (dict): document resource
"""
document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1]
return iterator.collection.document(document_id) | [
"def",
"_item_to_document_ref",
"(",
"iterator",
",",
"item",
")",
":",
"document_id",
"=",
"item",
".",
"name",
".",
"split",
"(",
"_helpers",
".",
"DOCUMENT_PATH_DELIMITER",
")",
"[",
"-",
"1",
"]",
"return",
"iterator",
".",
"collection",
".",
"document",
"(",
"document_id",
")"
] | Convert Document resource to document ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (dict): document resource | [
"Convert",
"Document",
"resource",
"to",
"document",
"ref",
"."
] | python | train | 35.9 |
Azure/msrest-for-python | msrest/serialization.py | https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L1688-L1703 | def deserialize_duration(attr):
"""Deserialize ISO-8601 formatted string into TimeDelta object.
:param str attr: response string to be deserialized.
:rtype: TimeDelta
:raises: DeserializationError if string format invalid.
"""
if isinstance(attr, ET.Element):
attr = attr.text
try:
duration = isodate.parse_duration(attr)
except(ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize duration object."
raise_with_traceback(DeserializationError, msg, err)
else:
return duration | [
"def",
"deserialize_duration",
"(",
"attr",
")",
":",
"if",
"isinstance",
"(",
"attr",
",",
"ET",
".",
"Element",
")",
":",
"attr",
"=",
"attr",
".",
"text",
"try",
":",
"duration",
"=",
"isodate",
".",
"parse_duration",
"(",
"attr",
")",
"except",
"(",
"ValueError",
",",
"OverflowError",
",",
"AttributeError",
")",
"as",
"err",
":",
"msg",
"=",
"\"Cannot deserialize duration object.\"",
"raise_with_traceback",
"(",
"DeserializationError",
",",
"msg",
",",
"err",
")",
"else",
":",
"return",
"duration"
] | Deserialize ISO-8601 formatted string into TimeDelta object.
:param str attr: response string to be deserialized.
:rtype: TimeDelta
:raises: DeserializationError if string format invalid. | [
"Deserialize",
"ISO",
"-",
"8601",
"formatted",
"string",
"into",
"TimeDelta",
"object",
"."
] | python | train | 38.5 |
AlecAivazis/graphql-over-kafka | nautilus/api/util/create_model_schema.py | https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/api/util/create_model_schema.py#L9-L48 | def create_model_schema(target_model):
""" This function creates a graphql schema that provides a single model """
from nautilus.database import db
# create the schema instance
schema = graphene.Schema(auto_camelcase=False)
# grab the primary key from the model
primary_key = target_model.primary_key()
primary_key_type = convert_peewee_field(primary_key)
# create a graphene object
class ModelObjectType(PeeweeObjectType):
class Meta:
model = target_model
pk = Field(primary_key_type, description="The primary key for this object.")
@graphene.resolve_only_args
def resolve_pk(self):
return getattr(self, self.primary_key().name)
class Query(graphene.ObjectType):
""" the root level query """
all_models = List(ModelObjectType, args=args_for_model(target_model))
@graphene.resolve_only_args
def resolve_all_models(self, **args):
# filter the model query according to the arguments
# print(filter_model(target_model, args)[0].__dict__)
return filter_model(target_model, args)
# add the query to the schema
schema.query = Query
return schema | [
"def",
"create_model_schema",
"(",
"target_model",
")",
":",
"from",
"nautilus",
".",
"database",
"import",
"db",
"# create the schema instance",
"schema",
"=",
"graphene",
".",
"Schema",
"(",
"auto_camelcase",
"=",
"False",
")",
"# grab the primary key from the model",
"primary_key",
"=",
"target_model",
".",
"primary_key",
"(",
")",
"primary_key_type",
"=",
"convert_peewee_field",
"(",
"primary_key",
")",
"# create a graphene object",
"class",
"ModelObjectType",
"(",
"PeeweeObjectType",
")",
":",
"class",
"Meta",
":",
"model",
"=",
"target_model",
"pk",
"=",
"Field",
"(",
"primary_key_type",
",",
"description",
"=",
"\"The primary key for this object.\"",
")",
"@",
"graphene",
".",
"resolve_only_args",
"def",
"resolve_pk",
"(",
"self",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"self",
".",
"primary_key",
"(",
")",
".",
"name",
")",
"class",
"Query",
"(",
"graphene",
".",
"ObjectType",
")",
":",
"\"\"\" the root level query \"\"\"",
"all_models",
"=",
"List",
"(",
"ModelObjectType",
",",
"args",
"=",
"args_for_model",
"(",
"target_model",
")",
")",
"@",
"graphene",
".",
"resolve_only_args",
"def",
"resolve_all_models",
"(",
"self",
",",
"*",
"*",
"args",
")",
":",
"# filter the model query according to the arguments",
"# print(filter_model(target_model, args)[0].__dict__)",
"return",
"filter_model",
"(",
"target_model",
",",
"args",
")",
"# add the query to the schema",
"schema",
".",
"query",
"=",
"Query",
"return",
"schema"
] | This function creates a graphql schema that provides a single model | [
"This",
"function",
"creates",
"a",
"graphql",
"schema",
"that",
"provides",
"a",
"single",
"model"
] | python | train | 29.725 |
pypa/pipenv | pipenv/vendor/requests/utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/utils.py#L648-L669 | def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True | [
"def",
"is_valid_cidr",
"(",
"string_network",
")",
":",
"if",
"string_network",
".",
"count",
"(",
"'/'",
")",
"==",
"1",
":",
"try",
":",
"mask",
"=",
"int",
"(",
"string_network",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"return",
"False",
"if",
"mask",
"<",
"1",
"or",
"mask",
">",
"32",
":",
"return",
"False",
"try",
":",
"socket",
".",
"inet_aton",
"(",
"string_network",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
")",
"except",
"socket",
".",
"error",
":",
"return",
"False",
"else",
":",
"return",
"False",
"return",
"True"
] | Very simple check of the cidr format in no_proxy variable.
:rtype: bool | [
"Very",
"simple",
"check",
"of",
"the",
"cidr",
"format",
"in",
"no_proxy",
"variable",
"."
] | python | train | 22.772727 |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_vlan.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_vlan.py#L155-L165 | def vlan_dot1q_tag_native(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
dot1q = ET.SubElement(vlan, "dot1q")
tag = ET.SubElement(dot1q, "tag")
native = ET.SubElement(tag, "native")
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"vlan_dot1q_tag_native",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"vlan",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"vlan\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-vlan\"",
")",
"dot1q",
"=",
"ET",
".",
"SubElement",
"(",
"vlan",
",",
"\"dot1q\"",
")",
"tag",
"=",
"ET",
".",
"SubElement",
"(",
"dot1q",
",",
"\"tag\"",
")",
"native",
"=",
"ET",
".",
"SubElement",
"(",
"tag",
",",
"\"native\"",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train | 38.636364 |
rsheftel/raccoon | raccoon/dataframe.py | https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1027-L1047 | def append(self, data_frame):
"""
Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current
DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the
current indexes or will raise an error.
:param data_frame: DataFrame to append
:return: nothing
"""
if len(data_frame) == 0: # empty DataFrame, do nothing
return
data_frame_index = data_frame.index
combined_index = self._index + data_frame_index
if len(set(combined_index)) != len(combined_index):
raise ValueError('duplicate indexes in DataFrames')
for c, column in enumerate(data_frame.columns):
if PYTHON3:
self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c].copy())
else:
self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c][:]) | [
"def",
"append",
"(",
"self",
",",
"data_frame",
")",
":",
"if",
"len",
"(",
"data_frame",
")",
"==",
"0",
":",
"# empty DataFrame, do nothing",
"return",
"data_frame_index",
"=",
"data_frame",
".",
"index",
"combined_index",
"=",
"self",
".",
"_index",
"+",
"data_frame_index",
"if",
"len",
"(",
"set",
"(",
"combined_index",
")",
")",
"!=",
"len",
"(",
"combined_index",
")",
":",
"raise",
"ValueError",
"(",
"'duplicate indexes in DataFrames'",
")",
"for",
"c",
",",
"column",
"in",
"enumerate",
"(",
"data_frame",
".",
"columns",
")",
":",
"if",
"PYTHON3",
":",
"self",
".",
"set",
"(",
"indexes",
"=",
"data_frame_index",
",",
"columns",
"=",
"column",
",",
"values",
"=",
"data_frame",
".",
"data",
"[",
"c",
"]",
".",
"copy",
"(",
")",
")",
"else",
":",
"self",
".",
"set",
"(",
"indexes",
"=",
"data_frame_index",
",",
"columns",
"=",
"column",
",",
"values",
"=",
"data_frame",
".",
"data",
"[",
"c",
"]",
"[",
":",
"]",
")"
] | Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current
DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the
current indexes or will raise an error.
:param data_frame: DataFrame to append
:return: nothing | [
"Append",
"another",
"DataFrame",
"to",
"this",
"DataFrame",
".",
"If",
"the",
"new",
"data_frame",
"has",
"columns",
"that",
"are",
"not",
"in",
"the",
"current",
"DataFrame",
"then",
"new",
"columns",
"will",
"be",
"created",
".",
"All",
"of",
"the",
"indexes",
"in",
"the",
"data_frame",
"must",
"be",
"different",
"from",
"the",
"current",
"indexes",
"or",
"will",
"raise",
"an",
"error",
"."
] | python | train | 47.047619 |
pyviz/geoviews | geoviews/element/geo.py | https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/element/geo.py#L142-L184 | def geoms(self, scale=None, bounds=None, as_element=True):
"""
Returns the geometries held by the Feature.
Parameters
----------
scale: str
Scale of the geometry to return expressed as string.
Available scales depends on the Feature type.
NaturalEarthFeature:
'10m', '50m', '110m'
GSHHSFeature:
'auto', 'coarse', 'low', 'intermediate', 'high', 'full'
bounds: tuple
Tuple of a bounding region to query for geometries in
as_element: boolean
Whether to wrap the geometries in an element
Returns
-------
geometries: Polygons/Path
Polygons or Path object wrapping around returned geometries
"""
feature = self.data
if scale is not None:
feature = feature.with_scale(scale)
if bounds:
extent = (bounds[0], bounds[2], bounds[1], bounds[3])
else:
extent = None
geoms = [g for g in feature.intersecting_geometries(extent) if g is not None]
if not as_element:
return geoms
elif not geoms or 'Polygon' in geoms[0].geom_type:
return Polygons(geoms, crs=feature.crs)
elif 'Point' in geoms[0].geom_type:
return Points(geoms, crs=feature.crs)
else:
return Path(geoms, crs=feature.crs) | [
"def",
"geoms",
"(",
"self",
",",
"scale",
"=",
"None",
",",
"bounds",
"=",
"None",
",",
"as_element",
"=",
"True",
")",
":",
"feature",
"=",
"self",
".",
"data",
"if",
"scale",
"is",
"not",
"None",
":",
"feature",
"=",
"feature",
".",
"with_scale",
"(",
"scale",
")",
"if",
"bounds",
":",
"extent",
"=",
"(",
"bounds",
"[",
"0",
"]",
",",
"bounds",
"[",
"2",
"]",
",",
"bounds",
"[",
"1",
"]",
",",
"bounds",
"[",
"3",
"]",
")",
"else",
":",
"extent",
"=",
"None",
"geoms",
"=",
"[",
"g",
"for",
"g",
"in",
"feature",
".",
"intersecting_geometries",
"(",
"extent",
")",
"if",
"g",
"is",
"not",
"None",
"]",
"if",
"not",
"as_element",
":",
"return",
"geoms",
"elif",
"not",
"geoms",
"or",
"'Polygon'",
"in",
"geoms",
"[",
"0",
"]",
".",
"geom_type",
":",
"return",
"Polygons",
"(",
"geoms",
",",
"crs",
"=",
"feature",
".",
"crs",
")",
"elif",
"'Point'",
"in",
"geoms",
"[",
"0",
"]",
".",
"geom_type",
":",
"return",
"Points",
"(",
"geoms",
",",
"crs",
"=",
"feature",
".",
"crs",
")",
"else",
":",
"return",
"Path",
"(",
"geoms",
",",
"crs",
"=",
"feature",
".",
"crs",
")"
] | Returns the geometries held by the Feature.
Parameters
----------
scale: str
Scale of the geometry to return expressed as string.
Available scales depends on the Feature type.
NaturalEarthFeature:
'10m', '50m', '110m'
GSHHSFeature:
'auto', 'coarse', 'low', 'intermediate', 'high', 'full'
bounds: tuple
Tuple of a bounding region to query for geometries in
as_element: boolean
Whether to wrap the geometries in an element
Returns
-------
geometries: Polygons/Path
Polygons or Path object wrapping around returned geometries | [
"Returns",
"the",
"geometries",
"held",
"by",
"the",
"Feature",
"."
] | python | train | 31.837209 |
seequent/properties | properties/basic.py | https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/basic.py#L521-L535 | def setter(self, func):
"""Register a set function for the DynamicProperty
This function must take two arguments, self and the new value.
Input value to the function is validated with prop validation prior to
execution.
"""
if not callable(func):
raise TypeError('setter must be callable function')
if hasattr(func, '__code__') and func.__code__.co_argcount != 2:
raise TypeError('setter must be a function with two arguments')
if func.__name__ != self.name:
raise TypeError('setter function must have same name as getter')
self._set_func = func
return self | [
"def",
"setter",
"(",
"self",
",",
"func",
")",
":",
"if",
"not",
"callable",
"(",
"func",
")",
":",
"raise",
"TypeError",
"(",
"'setter must be callable function'",
")",
"if",
"hasattr",
"(",
"func",
",",
"'__code__'",
")",
"and",
"func",
".",
"__code__",
".",
"co_argcount",
"!=",
"2",
":",
"raise",
"TypeError",
"(",
"'setter must be a function with two arguments'",
")",
"if",
"func",
".",
"__name__",
"!=",
"self",
".",
"name",
":",
"raise",
"TypeError",
"(",
"'setter function must have same name as getter'",
")",
"self",
".",
"_set_func",
"=",
"func",
"return",
"self"
] | Register a set function for the DynamicProperty
This function must take two arguments, self and the new value.
Input value to the function is validated with prop validation prior to
execution. | [
"Register",
"a",
"set",
"function",
"for",
"the",
"DynamicProperty"
] | python | train | 44 |
iterative/dvc | dvc/analytics.py | https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/analytics.py#L247-L261 | def send_cmd(cmd, args, ret):
"""Collect and send analytics for CLI command.
Args:
args (list): parsed args for the CLI command.
ret (int): return value of the CLI command.
"""
from dvc.daemon import daemon
if not Analytics._is_enabled(cmd):
return
analytics = Analytics()
analytics.collect_cmd(args, ret)
daemon(["analytics", analytics.dump()]) | [
"def",
"send_cmd",
"(",
"cmd",
",",
"args",
",",
"ret",
")",
":",
"from",
"dvc",
".",
"daemon",
"import",
"daemon",
"if",
"not",
"Analytics",
".",
"_is_enabled",
"(",
"cmd",
")",
":",
"return",
"analytics",
"=",
"Analytics",
"(",
")",
"analytics",
".",
"collect_cmd",
"(",
"args",
",",
"ret",
")",
"daemon",
"(",
"[",
"\"analytics\"",
",",
"analytics",
".",
"dump",
"(",
")",
"]",
")"
] | Collect and send analytics for CLI command.
Args:
args (list): parsed args for the CLI command.
ret (int): return value of the CLI command. | [
"Collect",
"and",
"send",
"analytics",
"for",
"CLI",
"command",
"."
] | python | train | 28.933333 |
Cog-Creators/Red-Lavalink | lavalink/player_manager.py | https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/player_manager.py#L265-L277 | async def stop(self):
"""
Stops playback from lavalink.
.. important::
This method will clear the queue.
"""
await self.node.stop(self.channel.guild.id)
self.queue = []
self.current = None
self.position = 0
self._paused = False | [
"async",
"def",
"stop",
"(",
"self",
")",
":",
"await",
"self",
".",
"node",
".",
"stop",
"(",
"self",
".",
"channel",
".",
"guild",
".",
"id",
")",
"self",
".",
"queue",
"=",
"[",
"]",
"self",
".",
"current",
"=",
"None",
"self",
".",
"position",
"=",
"0",
"self",
".",
"_paused",
"=",
"False"
] | Stops playback from lavalink.
.. important::
This method will clear the queue. | [
"Stops",
"playback",
"from",
"lavalink",
"."
] | python | train | 23.153846 |
apache/spark | python/pyspark/rdd.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2186-L2204 | def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func) | [
"def",
"zipWithUniqueId",
"(",
"self",
")",
":",
"n",
"=",
"self",
".",
"getNumPartitions",
"(",
")",
"def",
"func",
"(",
"k",
",",
"it",
")",
":",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"it",
")",
":",
"yield",
"v",
",",
"i",
"*",
"n",
"+",
"k",
"return",
"self",
".",
"mapPartitionsWithIndex",
"(",
"func",
")"
] | Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] | [
"Zips",
"this",
"RDD",
"with",
"generated",
"unique",
"Long",
"ids",
"."
] | python | train | 34.473684 |
fhcrc/nestly | nestly/scons.py | https://github.com/fhcrc/nestly/blob/4d7818b5950f405d2067a6b8577d5afb7527c9ff/nestly/scons.py#L37-L47 | def name_targets(func):
"""
Wrap a function such that returning ``'a', 'b', 'c', [1, 2, 3]`` transforms
the value into ``dict(a=1, b=2, c=3)``.
This is useful in the case where the last parameter is an SCons command.
"""
def wrap(*a, **kw):
ret = func(*a, **kw)
return dict(zip(ret[:-1], ret[-1]))
return wrap | [
"def",
"name_targets",
"(",
"func",
")",
":",
"def",
"wrap",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"ret",
"=",
"func",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"return",
"dict",
"(",
"zip",
"(",
"ret",
"[",
":",
"-",
"1",
"]",
",",
"ret",
"[",
"-",
"1",
"]",
")",
")",
"return",
"wrap"
] | Wrap a function such that returning ``'a', 'b', 'c', [1, 2, 3]`` transforms
the value into ``dict(a=1, b=2, c=3)``.
This is useful in the case where the last parameter is an SCons command. | [
"Wrap",
"a",
"function",
"such",
"that",
"returning",
"a",
"b",
"c",
"[",
"1",
"2",
"3",
"]",
"transforms",
"the",
"value",
"into",
"dict",
"(",
"a",
"=",
"1",
"b",
"=",
"2",
"c",
"=",
"3",
")",
"."
] | python | train | 31.272727 |
jxtech/wechatpy | wechatpy/client/api/device.py | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/device.py#L96-L114 | def bind(self, ticket, device_id, user_id):
"""
绑定设备
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'bind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
) | [
"def",
"bind",
"(",
"self",
",",
"ticket",
",",
"device_id",
",",
"user_id",
")",
":",
"return",
"self",
".",
"_post",
"(",
"'bind'",
",",
"data",
"=",
"{",
"'ticket'",
":",
"ticket",
",",
"'device_id'",
":",
"device_id",
",",
"'openid'",
":",
"user_id",
"}",
")"
] | 绑定设备
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包 | [
"绑定设备",
"详情请参考",
"https",
":",
"//",
"iot",
".",
"weixin",
".",
"qq",
".",
"com",
"/",
"wiki",
"/",
"new",
"/",
"index",
".",
"html?page",
"=",
"3",
"-",
"4",
"-",
"7"
] | python | train | 26.210526 |
swistakm/python-gmaps | src/gmaps/client.py | https://github.com/swistakm/python-gmaps/blob/ef3bdea6f02277200f21a09f99d4e2aebad762b9/src/gmaps/client.py#L36-L53 | def _serialize_parameters(parameters):
"""Serialize some parameters to match python native types with formats
specified in google api docs like:
* True/False -> "true"/"false",
* {"a": 1, "b":2} -> "a:1|b:2"
:type parameters: dict oif query parameters
"""
for key, value in parameters.items():
if isinstance(value, bool):
parameters[key] = "true" if value else "false"
elif isinstance(value, dict):
parameters[key] = "|".join(
("%s:%s" % (k, v) for k, v in value.items()))
elif isinstance(value, (list, tuple)):
parameters[key] = "|".join(value)
return parameters | [
"def",
"_serialize_parameters",
"(",
"parameters",
")",
":",
"for",
"key",
",",
"value",
"in",
"parameters",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"parameters",
"[",
"key",
"]",
"=",
"\"true\"",
"if",
"value",
"else",
"\"false\"",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"parameters",
"[",
"key",
"]",
"=",
"\"|\"",
".",
"join",
"(",
"(",
"\"%s:%s\"",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"parameters",
"[",
"key",
"]",
"=",
"\"|\"",
".",
"join",
"(",
"value",
")",
"return",
"parameters"
] | Serialize some parameters to match python native types with formats
specified in google api docs like:
* True/False -> "true"/"false",
* {"a": 1, "b":2} -> "a:1|b:2"
:type parameters: dict oif query parameters | [
"Serialize",
"some",
"parameters",
"to",
"match",
"python",
"native",
"types",
"with",
"formats",
"specified",
"in",
"google",
"api",
"docs",
"like",
":",
"*",
"True",
"/",
"False",
"-",
">",
"true",
"/",
"false",
"*",
"{",
"a",
":",
"1",
"b",
":",
"2",
"}",
"-",
">",
"a",
":",
"1|b",
":",
"2"
] | python | train | 39.777778 |
mdgoldberg/sportsref | sportsref/nba/pbp.py | https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/pbp.py#L26-L379 | def parse_play(boxscore_id, details, is_hm):
"""Parse play details from a play-by-play string describing a play.
Assuming valid input, this function returns structured data in a dictionary
describing the play. If the play detail string was invalid, this function
returns None.
:param boxscore_id: the boxscore ID of the play
:param details: detail string for the play
:param is_hm: bool indicating whether the offense is at home
:param returns: dictionary of play attributes or None if invalid
:rtype: dictionary or None
"""
# if input isn't a string, return None
if not details or not isinstance(details, basestring):
return None
bs = sportsref.nba.BoxScore(boxscore_id)
aw, hm = bs.away(), bs.home()
season = sportsref.nba.Season(bs.season())
hm_roster = set(bs.basic_stats().query('is_home == True').player_id.values)
p = {}
p['detail'] = details
p['home'] = hm
p['away'] = aw
p['is_home_play'] = is_hm
# parsing field goal attempts
shotRE = (r'(?P<shooter>{0}) (?P<is_fgm>makes|misses) '
'(?P<is_three>2|3)\-pt shot').format(PLAYER_RE)
distRE = r' (?:from (?P<shot_dist>\d+) ft|at rim)'
assistRE = r' \(assist by (?P<assister>{0})\)'.format(PLAYER_RE)
blockRE = r' \(block by (?P<blocker>{0})\)'.format(PLAYER_RE)
shotRE = r'{0}{1}(?:{2}|{3})?'.format(shotRE, distRE, assistRE, blockRE)
m = re.match(shotRE, details, re.IGNORECASE)
if m:
p['is_fga'] = True
p.update(m.groupdict())
p['shot_dist'] = p['shot_dist'] if p['shot_dist'] is not None else 0
p['shot_dist'] = int(p['shot_dist'])
p['is_fgm'] = p['is_fgm'] == 'makes'
p['is_three'] = p['is_three'] == '3'
p['is_assist'] = pd.notnull(p.get('assister'))
p['is_block'] = pd.notnull(p.get('blocker'))
shooter_home = p['shooter'] in hm_roster
p['off_team'] = hm if shooter_home else aw
p['def_team'] = aw if shooter_home else hm
return p
# parsing jump balls
jumpRE = ((r'Jump ball: (?P<away_jumper>{0}) vs\. (?P<home_jumper>{0})'
r'(?: \((?P<gains_poss>{0}) gains possession\))?')
.format(PLAYER_RE))
m = re.match(jumpRE, details, re.IGNORECASE)
if m:
p['is_jump_ball'] = True
p.update(m.groupdict())
return p
# parsing rebounds
rebRE = (r'(?P<is_oreb>Offensive|Defensive) rebound'
r' by (?P<rebounder>{0}|Team)').format(PLAYER_RE)
m = re.match(rebRE, details, re.I)
if m:
p['is_reb'] = True
p.update(m.groupdict())
p['is_oreb'] = p['is_oreb'].lower() == 'offensive'
p['is_dreb'] = not p['is_oreb']
if p['rebounder'] == 'Team':
p['reb_team'], other = (hm, aw) if is_hm else (aw, hm)
else:
reb_home = p['rebounder'] in hm_roster
p['reb_team'], other = (hm, aw) if reb_home else (aw, hm)
p['off_team'] = p['reb_team'] if p['is_oreb'] else other
p['def_team'] = p['reb_team'] if p['is_dreb'] else other
return p
# parsing free throws
ftRE = (r'(?P<ft_shooter>{}) (?P<is_ftm>makes|misses) '
r'(?P<is_tech_fta>technical )?(?P<is_flag_fta>flagrant )?'
r'(?P<is_clearpath_fta>clear path )?free throw'
r'(?: (?P<fta_num>\d+) of (?P<tot_fta>\d+))?').format(PLAYER_RE)
m = re.match(ftRE, details, re.I)
if m:
p['is_fta'] = True
p.update(m.groupdict())
p['is_ftm'] = p['is_ftm'] == 'makes'
p['is_tech_fta'] = bool(p['is_tech_fta'])
p['is_flag_fta'] = bool(p['is_flag_fta'])
p['is_clearpath_fta'] = bool(p['is_clearpath_fta'])
p['is_pf_fta'] = not p['is_tech_fta']
if p['tot_fta']:
p['tot_fta'] = int(p['tot_fta'])
if p['fta_num']:
p['fta_num'] = int(p['fta_num'])
ft_home = p['ft_shooter'] in hm_roster
p['fta_team'] = hm if ft_home else aw
if not p['is_tech_fta']:
p['off_team'] = hm if ft_home else aw
p['def_team'] = aw if ft_home else hm
return p
# parsing substitutions
subRE = (r'(?P<sub_in>{0}) enters the game for '
r'(?P<sub_out>{0})').format(PLAYER_RE)
m = re.match(subRE, details, re.I)
if m:
p['is_sub'] = True
p.update(m.groupdict())
sub_home = p['sub_in'] in hm_roster or p['sub_out'] in hm_roster
p['sub_team'] = hm if sub_home else aw
return p
# parsing turnovers
toReasons = (r'(?P<to_type>[^;]+)(?:; steal by '
r'(?P<stealer>{0}))?').format(PLAYER_RE)
toRE = (r'Turnover by (?P<to_by>{}|Team) '
r'\((?:{})\)').format(PLAYER_RE, toReasons)
m = re.match(toRE, details, re.I)
if m:
p['is_to'] = True
p.update(m.groupdict())
p['to_type'] = p['to_type'].lower()
if p['to_type'] == 'offensive foul':
return None
p['is_steal'] = pd.notnull(p['stealer'])
p['is_travel'] = p['to_type'] == 'traveling'
p['is_shot_clock_viol'] = p['to_type'] == 'shot clock'
p['is_oob'] = p['to_type'] == 'step out of bounds'
p['is_three_sec_viol'] = p['to_type'] == '3 sec'
p['is_backcourt_viol'] = p['to_type'] == 'back court'
p['is_off_goaltend'] = p['to_type'] == 'offensive goaltending'
p['is_double_dribble'] = p['to_type'] == 'dbl dribble'
p['is_discont_dribble'] = p['to_type'] == 'discontinued dribble'
p['is_carry'] = p['to_type'] == 'palming'
if p['to_by'] == 'Team':
p['off_team'] = hm if is_hm else aw
p['def_team'] = aw if is_hm else hm
else:
to_home = p['to_by'] in hm_roster
p['off_team'] = hm if to_home else aw
p['def_team'] = aw if to_home else hm
return p
# parsing shooting fouls
shotFoulRE = (r'Shooting(?P<is_block_foul> block)? foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(shotFoulRE, details, re.I)
if m:
p['is_pf'] = True
p['is_shot_foul'] = True
p.update(m.groupdict())
p['is_block_foul'] = bool(p['is_block_foul'])
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing offensive fouls
offFoulRE = (r'Offensive(?P<is_charge> charge)? foul '
r'by (?P<to_by>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(offFoulRE, details, re.I)
if m:
p['is_pf'] = True
p['is_off_foul'] = True
p['is_to'] = True
p['to_type'] = 'offensive foul'
p.update(m.groupdict())
p['is_charge'] = bool(p['is_charge'])
p['fouler'] = p['to_by']
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = hm if foul_on_home else aw
p['def_team'] = aw if foul_on_home else hm
p['foul_team'] = p['off_team']
return p
# parsing personal fouls
foulRE = (r'Personal (?P<is_take_foul>take )?(?P<is_block_foul>block )?'
r'foul by (?P<fouler>{0})(?: \(drawn by '
r'(?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(foulRE, details, re.I)
if m:
p['is_pf'] = True
p.update(m.groupdict())
p['is_take_foul'] = bool(p['is_take_foul'])
p['is_block_foul'] = bool(p['is_block_foul'])
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# TODO: parsing double personal fouls
# double_foul_re = (r'Double personal foul by (?P<fouler1>{0}) and '
# r'(?P<fouler2>{0})').format(PLAYER_RE)
# m = re.match(double_Foul_re, details, re.I)
# if m:
# p['is_pf'] = True
# p.update(m.groupdict())
# p['off_team'] =
# parsing loose ball fouls
looseBallRE = (r'Loose ball foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(looseBallRE, details, re.I)
if m:
p['is_pf'] = True
p['is_loose_ball_foul'] = True
p.update(m.groupdict())
foul_home = p['fouler'] in hm_roster
p['foul_team'] = hm if foul_home else aw
return p
# parsing punching fouls
# TODO
# parsing away from play fouls
awayFromBallRE = ((r'Away from play foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?')
.format(PLAYER_RE))
m = re.match(awayFromBallRE, details, re.I)
if m:
p['is_pf'] = True
p['is_away_from_play_foul'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
# TODO: figure out who had the ball based on previous play
p['foul_team'] = hm if foul_on_home else aw
return p
# parsing inbound fouls
inboundRE = (r'Inbound foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(inboundRE, details, re.I)
if m:
p['is_pf'] = True
p['is_inbound_foul'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing flagrant fouls
flagrantRE = (r'Flagrant foul type (?P<flag_type>1|2) by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(flagrantRE, details, re.I)
if m:
p['is_pf'] = True
p['is_flagrant'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
p['foul_team'] = hm if foul_on_home else aw
return p
# parsing clear path fouls
clearPathRE = (r'Clear path foul by (?P<fouler>{0})'
r'(?: \(drawn by (?P<drew_foul>{0})\))?').format(PLAYER_RE)
m = re.match(clearPathRE, details, re.I)
if m:
p['is_pf'] = True
p['is_clear_path_foul'] = True
p.update(m.groupdict())
foul_on_home = p['fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing timeouts
timeoutRE = r'(?P<timeout_team>.*?) (?:full )?timeout'
m = re.match(timeoutRE, details, re.I)
if m:
p['is_timeout'] = True
p.update(m.groupdict())
isOfficialTO = p['timeout_team'].lower() == 'official'
name_to_id = season.team_names_to_ids()
p['timeout_team'] = (
'Official' if isOfficialTO else
name_to_id.get(hm, name_to_id.get(aw, p['timeout_team']))
)
return p
# parsing technical fouls
techRE = (r'(?P<is_hanging>Hanging )?'
r'(?P<is_taunting>Taunting )?'
r'(?P<is_ill_def>Ill def )?'
r'(?P<is_delay>Delay )?'
r'(?P<is_unsport>Non unsport )?'
r'tech(?:nical)? foul by '
r'(?P<tech_fouler>{0}|Team)').format(PLAYER_RE)
m = re.match(techRE, details, re.I)
if m:
p['is_tech_foul'] = True
p.update(m.groupdict())
p['is_hanging'] = bool(p['is_hanging'])
p['is_taunting'] = bool(p['is_taunting'])
p['is_ill_def'] = bool(p['is_ill_def'])
p['is_delay'] = bool(p['is_delay'])
p['is_unsport'] = bool(p['is_unsport'])
foul_on_home = p['tech_fouler'] in hm_roster
p['foul_team'] = hm if foul_on_home else aw
return p
# parsing ejections
ejectRE = r'(?P<ejectee>{0}|Team) ejected from game'.format(PLAYER_RE)
m = re.match(ejectRE, details, re.I)
if m:
p['is_ejection'] = True
p.update(m.groupdict())
if p['ejectee'] == 'Team':
p['ejectee_team'] = hm if is_hm else aw
else:
eject_home = p['ejectee'] in hm_roster
p['ejectee_team'] = hm if eject_home else aw
return p
# parsing defensive 3 seconds techs
def3TechRE = (r'(?:Def 3 sec tech foul|Defensive three seconds)'
r' by (?P<tech_fouler>{})').format(PLAYER_RE)
m = re.match(def3TechRE, details, re.I)
if m:
p['is_tech_foul'] = True
p['is_def_three_secs'] = True
p.update(m.groupdict())
foul_on_home = p['tech_fouler'] in hm_roster
p['off_team'] = aw if foul_on_home else hm
p['def_team'] = hm if foul_on_home else aw
p['foul_team'] = p['def_team']
return p
# parsing violations
violRE = (r'Violation by (?P<violator>{0}|Team) '
r'\((?P<viol_type>.*)\)').format(PLAYER_RE)
m = re.match(violRE, details, re.I)
if m:
p['is_viol'] = True
p.update(m.groupdict())
if p['viol_type'] == 'kicked_ball':
p['is_to'] = True
p['to_by'] = p['violator']
if p['violator'] == 'Team':
p['viol_team'] = hm if is_hm else aw
else:
viol_home = p['violator'] in hm_roster
p['viol_team'] = hm if viol_home else aw
return p
p['is_error'] = True
return p | [
"def",
"parse_play",
"(",
"boxscore_id",
",",
"details",
",",
"is_hm",
")",
":",
"# if input isn't a string, return None",
"if",
"not",
"details",
"or",
"not",
"isinstance",
"(",
"details",
",",
"basestring",
")",
":",
"return",
"None",
"bs",
"=",
"sportsref",
".",
"nba",
".",
"BoxScore",
"(",
"boxscore_id",
")",
"aw",
",",
"hm",
"=",
"bs",
".",
"away",
"(",
")",
",",
"bs",
".",
"home",
"(",
")",
"season",
"=",
"sportsref",
".",
"nba",
".",
"Season",
"(",
"bs",
".",
"season",
"(",
")",
")",
"hm_roster",
"=",
"set",
"(",
"bs",
".",
"basic_stats",
"(",
")",
".",
"query",
"(",
"'is_home == True'",
")",
".",
"player_id",
".",
"values",
")",
"p",
"=",
"{",
"}",
"p",
"[",
"'detail'",
"]",
"=",
"details",
"p",
"[",
"'home'",
"]",
"=",
"hm",
"p",
"[",
"'away'",
"]",
"=",
"aw",
"p",
"[",
"'is_home_play'",
"]",
"=",
"is_hm",
"# parsing field goal attempts",
"shotRE",
"=",
"(",
"r'(?P<shooter>{0}) (?P<is_fgm>makes|misses) '",
"'(?P<is_three>2|3)\\-pt shot'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"distRE",
"=",
"r' (?:from (?P<shot_dist>\\d+) ft|at rim)'",
"assistRE",
"=",
"r' \\(assist by (?P<assister>{0})\\)'",
".",
"format",
"(",
"PLAYER_RE",
")",
"blockRE",
"=",
"r' \\(block by (?P<blocker>{0})\\)'",
".",
"format",
"(",
"PLAYER_RE",
")",
"shotRE",
"=",
"r'{0}{1}(?:{2}|{3})?'",
".",
"format",
"(",
"shotRE",
",",
"distRE",
",",
"assistRE",
",",
"blockRE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"shotRE",
",",
"details",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"m",
":",
"p",
"[",
"'is_fga'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'shot_dist'",
"]",
"=",
"p",
"[",
"'shot_dist'",
"]",
"if",
"p",
"[",
"'shot_dist'",
"]",
"is",
"not",
"None",
"else",
"0",
"p",
"[",
"'shot_dist'",
"]",
"=",
"int",
"(",
"p",
"[",
"'shot_dist'",
"]",
")",
"p",
"[",
"'is_fgm'",
"]",
"=",
"p",
"[",
"'is_fgm'",
"]",
"==",
"'makes'",
"p",
"[",
"'is_three'",
"]",
"=",
"p",
"[",
"'is_three'",
"]",
"==",
"'3'",
"p",
"[",
"'is_assist'",
"]",
"=",
"pd",
".",
"notnull",
"(",
"p",
".",
"get",
"(",
"'assister'",
")",
")",
"p",
"[",
"'is_block'",
"]",
"=",
"pd",
".",
"notnull",
"(",
"p",
".",
"get",
"(",
"'blocker'",
")",
")",
"shooter_home",
"=",
"p",
"[",
"'shooter'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"hm",
"if",
"shooter_home",
"else",
"aw",
"p",
"[",
"'def_team'",
"]",
"=",
"aw",
"if",
"shooter_home",
"else",
"hm",
"return",
"p",
"# parsing jump balls",
"jumpRE",
"=",
"(",
"(",
"r'Jump ball: (?P<away_jumper>{0}) vs\\. (?P<home_jumper>{0})'",
"r'(?: \\((?P<gains_poss>{0}) gains possession\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
")",
"m",
"=",
"re",
".",
"match",
"(",
"jumpRE",
",",
"details",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"m",
":",
"p",
"[",
"'is_jump_ball'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"return",
"p",
"# parsing rebounds",
"rebRE",
"=",
"(",
"r'(?P<is_oreb>Offensive|Defensive) rebound'",
"r' by (?P<rebounder>{0}|Team)'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"rebRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_reb'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'is_oreb'",
"]",
"=",
"p",
"[",
"'is_oreb'",
"]",
".",
"lower",
"(",
")",
"==",
"'offensive'",
"p",
"[",
"'is_dreb'",
"]",
"=",
"not",
"p",
"[",
"'is_oreb'",
"]",
"if",
"p",
"[",
"'rebounder'",
"]",
"==",
"'Team'",
":",
"p",
"[",
"'reb_team'",
"]",
",",
"other",
"=",
"(",
"hm",
",",
"aw",
")",
"if",
"is_hm",
"else",
"(",
"aw",
",",
"hm",
")",
"else",
":",
"reb_home",
"=",
"p",
"[",
"'rebounder'",
"]",
"in",
"hm_roster",
"p",
"[",
"'reb_team'",
"]",
",",
"other",
"=",
"(",
"hm",
",",
"aw",
")",
"if",
"reb_home",
"else",
"(",
"aw",
",",
"hm",
")",
"p",
"[",
"'off_team'",
"]",
"=",
"p",
"[",
"'reb_team'",
"]",
"if",
"p",
"[",
"'is_oreb'",
"]",
"else",
"other",
"p",
"[",
"'def_team'",
"]",
"=",
"p",
"[",
"'reb_team'",
"]",
"if",
"p",
"[",
"'is_dreb'",
"]",
"else",
"other",
"return",
"p",
"# parsing free throws",
"ftRE",
"=",
"(",
"r'(?P<ft_shooter>{}) (?P<is_ftm>makes|misses) '",
"r'(?P<is_tech_fta>technical )?(?P<is_flag_fta>flagrant )?'",
"r'(?P<is_clearpath_fta>clear path )?free throw'",
"r'(?: (?P<fta_num>\\d+) of (?P<tot_fta>\\d+))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"ftRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_fta'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'is_ftm'",
"]",
"=",
"p",
"[",
"'is_ftm'",
"]",
"==",
"'makes'",
"p",
"[",
"'is_tech_fta'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_tech_fta'",
"]",
")",
"p",
"[",
"'is_flag_fta'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_flag_fta'",
"]",
")",
"p",
"[",
"'is_clearpath_fta'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_clearpath_fta'",
"]",
")",
"p",
"[",
"'is_pf_fta'",
"]",
"=",
"not",
"p",
"[",
"'is_tech_fta'",
"]",
"if",
"p",
"[",
"'tot_fta'",
"]",
":",
"p",
"[",
"'tot_fta'",
"]",
"=",
"int",
"(",
"p",
"[",
"'tot_fta'",
"]",
")",
"if",
"p",
"[",
"'fta_num'",
"]",
":",
"p",
"[",
"'fta_num'",
"]",
"=",
"int",
"(",
"p",
"[",
"'fta_num'",
"]",
")",
"ft_home",
"=",
"p",
"[",
"'ft_shooter'",
"]",
"in",
"hm_roster",
"p",
"[",
"'fta_team'",
"]",
"=",
"hm",
"if",
"ft_home",
"else",
"aw",
"if",
"not",
"p",
"[",
"'is_tech_fta'",
"]",
":",
"p",
"[",
"'off_team'",
"]",
"=",
"hm",
"if",
"ft_home",
"else",
"aw",
"p",
"[",
"'def_team'",
"]",
"=",
"aw",
"if",
"ft_home",
"else",
"hm",
"return",
"p",
"# parsing substitutions",
"subRE",
"=",
"(",
"r'(?P<sub_in>{0}) enters the game for '",
"r'(?P<sub_out>{0})'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"subRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_sub'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"sub_home",
"=",
"p",
"[",
"'sub_in'",
"]",
"in",
"hm_roster",
"or",
"p",
"[",
"'sub_out'",
"]",
"in",
"hm_roster",
"p",
"[",
"'sub_team'",
"]",
"=",
"hm",
"if",
"sub_home",
"else",
"aw",
"return",
"p",
"# parsing turnovers",
"toReasons",
"=",
"(",
"r'(?P<to_type>[^;]+)(?:; steal by '",
"r'(?P<stealer>{0}))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"toRE",
"=",
"(",
"r'Turnover by (?P<to_by>{}|Team) '",
"r'\\((?:{})\\)'",
")",
".",
"format",
"(",
"PLAYER_RE",
",",
"toReasons",
")",
"m",
"=",
"re",
".",
"match",
"(",
"toRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_to'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'to_type'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
".",
"lower",
"(",
")",
"if",
"p",
"[",
"'to_type'",
"]",
"==",
"'offensive foul'",
":",
"return",
"None",
"p",
"[",
"'is_steal'",
"]",
"=",
"pd",
".",
"notnull",
"(",
"p",
"[",
"'stealer'",
"]",
")",
"p",
"[",
"'is_travel'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'traveling'",
"p",
"[",
"'is_shot_clock_viol'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'shot clock'",
"p",
"[",
"'is_oob'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'step out of bounds'",
"p",
"[",
"'is_three_sec_viol'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'3 sec'",
"p",
"[",
"'is_backcourt_viol'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'back court'",
"p",
"[",
"'is_off_goaltend'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'offensive goaltending'",
"p",
"[",
"'is_double_dribble'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'dbl dribble'",
"p",
"[",
"'is_discont_dribble'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'discontinued dribble'",
"p",
"[",
"'is_carry'",
"]",
"=",
"p",
"[",
"'to_type'",
"]",
"==",
"'palming'",
"if",
"p",
"[",
"'to_by'",
"]",
"==",
"'Team'",
":",
"p",
"[",
"'off_team'",
"]",
"=",
"hm",
"if",
"is_hm",
"else",
"aw",
"p",
"[",
"'def_team'",
"]",
"=",
"aw",
"if",
"is_hm",
"else",
"hm",
"else",
":",
"to_home",
"=",
"p",
"[",
"'to_by'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"hm",
"if",
"to_home",
"else",
"aw",
"p",
"[",
"'def_team'",
"]",
"=",
"aw",
"if",
"to_home",
"else",
"hm",
"return",
"p",
"# parsing shooting fouls",
"shotFoulRE",
"=",
"(",
"r'Shooting(?P<is_block_foul> block)? foul by (?P<fouler>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"shotFoulRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_shot_foul'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'is_block_foul'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_block_foul'",
"]",
")",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"aw",
"if",
"foul_on_home",
"else",
"hm",
"p",
"[",
"'def_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"p",
"[",
"'foul_team'",
"]",
"=",
"p",
"[",
"'def_team'",
"]",
"return",
"p",
"# parsing offensive fouls",
"offFoulRE",
"=",
"(",
"r'Offensive(?P<is_charge> charge)? foul '",
"r'by (?P<to_by>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"offFoulRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_off_foul'",
"]",
"=",
"True",
"p",
"[",
"'is_to'",
"]",
"=",
"True",
"p",
"[",
"'to_type'",
"]",
"=",
"'offensive foul'",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'is_charge'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_charge'",
"]",
")",
"p",
"[",
"'fouler'",
"]",
"=",
"p",
"[",
"'to_by'",
"]",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"p",
"[",
"'def_team'",
"]",
"=",
"aw",
"if",
"foul_on_home",
"else",
"hm",
"p",
"[",
"'foul_team'",
"]",
"=",
"p",
"[",
"'off_team'",
"]",
"return",
"p",
"# parsing personal fouls",
"foulRE",
"=",
"(",
"r'Personal (?P<is_take_foul>take )?(?P<is_block_foul>block )?'",
"r'foul by (?P<fouler>{0})(?: \\(drawn by '",
"r'(?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"foulRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'is_take_foul'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_take_foul'",
"]",
")",
"p",
"[",
"'is_block_foul'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_block_foul'",
"]",
")",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"aw",
"if",
"foul_on_home",
"else",
"hm",
"p",
"[",
"'def_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"p",
"[",
"'foul_team'",
"]",
"=",
"p",
"[",
"'def_team'",
"]",
"return",
"p",
"# TODO: parsing double personal fouls",
"# double_foul_re = (r'Double personal foul by (?P<fouler1>{0}) and '",
"# r'(?P<fouler2>{0})').format(PLAYER_RE)",
"# m = re.match(double_Foul_re, details, re.I)",
"# if m:",
"# p['is_pf'] = True",
"# p.update(m.groupdict())",
"# p['off_team'] =",
"# parsing loose ball fouls",
"looseBallRE",
"=",
"(",
"r'Loose ball foul by (?P<fouler>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"looseBallRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_loose_ball_foul'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"foul_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'foul_team'",
"]",
"=",
"hm",
"if",
"foul_home",
"else",
"aw",
"return",
"p",
"# parsing punching fouls",
"# TODO",
"# parsing away from play fouls",
"awayFromBallRE",
"=",
"(",
"(",
"r'Away from play foul by (?P<fouler>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
")",
"m",
"=",
"re",
".",
"match",
"(",
"awayFromBallRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_away_from_play_foul'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"# TODO: figure out who had the ball based on previous play",
"p",
"[",
"'foul_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"return",
"p",
"# parsing inbound fouls",
"inboundRE",
"=",
"(",
"r'Inbound foul by (?P<fouler>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"inboundRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_inbound_foul'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"aw",
"if",
"foul_on_home",
"else",
"hm",
"p",
"[",
"'def_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"p",
"[",
"'foul_team'",
"]",
"=",
"p",
"[",
"'def_team'",
"]",
"return",
"p",
"# parsing flagrant fouls",
"flagrantRE",
"=",
"(",
"r'Flagrant foul type (?P<flag_type>1|2) by (?P<fouler>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"flagrantRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_flagrant'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'foul_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"return",
"p",
"# parsing clear path fouls",
"clearPathRE",
"=",
"(",
"r'Clear path foul by (?P<fouler>{0})'",
"r'(?: \\(drawn by (?P<drew_foul>{0})\\))?'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"clearPathRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_pf'",
"]",
"=",
"True",
"p",
"[",
"'is_clear_path_foul'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"foul_on_home",
"=",
"p",
"[",
"'fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"aw",
"if",
"foul_on_home",
"else",
"hm",
"p",
"[",
"'def_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"p",
"[",
"'foul_team'",
"]",
"=",
"p",
"[",
"'def_team'",
"]",
"return",
"p",
"# parsing timeouts",
"timeoutRE",
"=",
"r'(?P<timeout_team>.*?) (?:full )?timeout'",
"m",
"=",
"re",
".",
"match",
"(",
"timeoutRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_timeout'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"isOfficialTO",
"=",
"p",
"[",
"'timeout_team'",
"]",
".",
"lower",
"(",
")",
"==",
"'official'",
"name_to_id",
"=",
"season",
".",
"team_names_to_ids",
"(",
")",
"p",
"[",
"'timeout_team'",
"]",
"=",
"(",
"'Official'",
"if",
"isOfficialTO",
"else",
"name_to_id",
".",
"get",
"(",
"hm",
",",
"name_to_id",
".",
"get",
"(",
"aw",
",",
"p",
"[",
"'timeout_team'",
"]",
")",
")",
")",
"return",
"p",
"# parsing technical fouls",
"techRE",
"=",
"(",
"r'(?P<is_hanging>Hanging )?'",
"r'(?P<is_taunting>Taunting )?'",
"r'(?P<is_ill_def>Ill def )?'",
"r'(?P<is_delay>Delay )?'",
"r'(?P<is_unsport>Non unsport )?'",
"r'tech(?:nical)? foul by '",
"r'(?P<tech_fouler>{0}|Team)'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"techRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_tech_foul'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"p",
"[",
"'is_hanging'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_hanging'",
"]",
")",
"p",
"[",
"'is_taunting'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_taunting'",
"]",
")",
"p",
"[",
"'is_ill_def'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_ill_def'",
"]",
")",
"p",
"[",
"'is_delay'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_delay'",
"]",
")",
"p",
"[",
"'is_unsport'",
"]",
"=",
"bool",
"(",
"p",
"[",
"'is_unsport'",
"]",
")",
"foul_on_home",
"=",
"p",
"[",
"'tech_fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'foul_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"return",
"p",
"# parsing ejections",
"ejectRE",
"=",
"r'(?P<ejectee>{0}|Team) ejected from game'",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"ejectRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_ejection'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"if",
"p",
"[",
"'ejectee'",
"]",
"==",
"'Team'",
":",
"p",
"[",
"'ejectee_team'",
"]",
"=",
"hm",
"if",
"is_hm",
"else",
"aw",
"else",
":",
"eject_home",
"=",
"p",
"[",
"'ejectee'",
"]",
"in",
"hm_roster",
"p",
"[",
"'ejectee_team'",
"]",
"=",
"hm",
"if",
"eject_home",
"else",
"aw",
"return",
"p",
"# parsing defensive 3 seconds techs",
"def3TechRE",
"=",
"(",
"r'(?:Def 3 sec tech foul|Defensive three seconds)'",
"r' by (?P<tech_fouler>{})'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"def3TechRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_tech_foul'",
"]",
"=",
"True",
"p",
"[",
"'is_def_three_secs'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"foul_on_home",
"=",
"p",
"[",
"'tech_fouler'",
"]",
"in",
"hm_roster",
"p",
"[",
"'off_team'",
"]",
"=",
"aw",
"if",
"foul_on_home",
"else",
"hm",
"p",
"[",
"'def_team'",
"]",
"=",
"hm",
"if",
"foul_on_home",
"else",
"aw",
"p",
"[",
"'foul_team'",
"]",
"=",
"p",
"[",
"'def_team'",
"]",
"return",
"p",
"# parsing violations",
"violRE",
"=",
"(",
"r'Violation by (?P<violator>{0}|Team) '",
"r'\\((?P<viol_type>.*)\\)'",
")",
".",
"format",
"(",
"PLAYER_RE",
")",
"m",
"=",
"re",
".",
"match",
"(",
"violRE",
",",
"details",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"p",
"[",
"'is_viol'",
"]",
"=",
"True",
"p",
".",
"update",
"(",
"m",
".",
"groupdict",
"(",
")",
")",
"if",
"p",
"[",
"'viol_type'",
"]",
"==",
"'kicked_ball'",
":",
"p",
"[",
"'is_to'",
"]",
"=",
"True",
"p",
"[",
"'to_by'",
"]",
"=",
"p",
"[",
"'violator'",
"]",
"if",
"p",
"[",
"'violator'",
"]",
"==",
"'Team'",
":",
"p",
"[",
"'viol_team'",
"]",
"=",
"hm",
"if",
"is_hm",
"else",
"aw",
"else",
":",
"viol_home",
"=",
"p",
"[",
"'violator'",
"]",
"in",
"hm_roster",
"p",
"[",
"'viol_team'",
"]",
"=",
"hm",
"if",
"viol_home",
"else",
"aw",
"return",
"p",
"p",
"[",
"'is_error'",
"]",
"=",
"True",
"return",
"p"
] | Parse play details from a play-by-play string describing a play.
Assuming valid input, this function returns structured data in a dictionary
describing the play. If the play detail string was invalid, this function
returns None.
:param boxscore_id: the boxscore ID of the play
:param details: detail string for the play
:param is_hm: bool indicating whether the offense is at home
:param returns: dictionary of play attributes or None if invalid
:rtype: dictionary or None | [
"Parse",
"play",
"details",
"from",
"a",
"play",
"-",
"by",
"-",
"play",
"string",
"describing",
"a",
"play",
"."
] | python | test | 37.336158 |
bitesofcode/projexui | projexui/widgets/xtreewidget/xtreewidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1014-L1026 | def hiddenColumns( self ):
"""
Returns a list of the hidden columns for this tree.
:return [<str>, ..]
"""
output = []
columns = self.columns()
for c, column in enumerate(columns):
if ( not self.isColumnHidden(c) ):
continue
output.append(column)
return output | [
"def",
"hiddenColumns",
"(",
"self",
")",
":",
"output",
"=",
"[",
"]",
"columns",
"=",
"self",
".",
"columns",
"(",
")",
"for",
"c",
",",
"column",
"in",
"enumerate",
"(",
"columns",
")",
":",
"if",
"(",
"not",
"self",
".",
"isColumnHidden",
"(",
"c",
")",
")",
":",
"continue",
"output",
".",
"append",
"(",
"column",
")",
"return",
"output"
] | Returns a list of the hidden columns for this tree.
:return [<str>, ..] | [
"Returns",
"a",
"list",
"of",
"the",
"hidden",
"columns",
"for",
"this",
"tree",
".",
":",
"return",
"[",
"<str",
">",
"..",
"]"
] | python | train | 29.076923 |
googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L76-L97 | def hashed(field_name, percent, fields=None, count=0):
"""Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling.
"""
if field_name is None:
raise Exception('Hash field must be specified')
def _hashed_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \
(projection, sql, field_name, percent)
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _hashed_sampling | [
"def",
"hashed",
"(",
"field_name",
",",
"percent",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"0",
")",
":",
"if",
"field_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Hash field must be specified'",
")",
"def",
"_hashed_sampling",
"(",
"sql",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"sql",
"=",
"'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"field_name",
",",
"percent",
")",
"if",
"count",
"!=",
"0",
":",
"sql",
"=",
"'%s LIMIT %d'",
"%",
"(",
"sql",
",",
"count",
")",
"return",
"sql",
"return",
"_hashed_sampling"
] | Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling. | [
"Provides",
"a",
"sampling",
"strategy",
"based",
"on",
"hashing",
"and",
"selecting",
"a",
"percentage",
"of",
"data",
"."
] | python | train | 40.227273 |
openstack/networking-cisco | networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L185-L210 | def is_fw_complete(self):
"""This API returns the completion status of FW.
This returns True if a FW is created with a active policy that has
more than one rule associated with it and if a driver init is done
successfully.
"""
LOG.info("In fw_complete needed %(fw_created)s "
"%(active_policy_id)s %(is_fw_drvr_created)s "
"%(pol_present)s %(fw_type)s",
{'fw_created': self.fw_created,
'active_policy_id': self.active_pol_id,
'is_fw_drvr_created': self.is_fw_drvr_created(),
'pol_present': self.active_pol_id in self.policies,
'fw_type': self.fw_type})
if self.active_pol_id is not None:
LOG.info("In Drvr create needed %(len_policy)s %(one_rule)s",
{'len_policy':
len(self.policies[self.active_pol_id]['rule_dict']),
'one_rule':
self.one_rule_present(self.active_pol_id)})
return self.fw_created and self.active_pol_id and (
self.is_fw_drvr_created()) and self.fw_type and (
self.active_pol_id in self.policies) and (
len(self.policies[self.active_pol_id]['rule_dict'])) > 0 and (
self.one_rule_present(self.active_pol_id)) | [
"def",
"is_fw_complete",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"\"In fw_complete needed %(fw_created)s \"",
"\"%(active_policy_id)s %(is_fw_drvr_created)s \"",
"\"%(pol_present)s %(fw_type)s\"",
",",
"{",
"'fw_created'",
":",
"self",
".",
"fw_created",
",",
"'active_policy_id'",
":",
"self",
".",
"active_pol_id",
",",
"'is_fw_drvr_created'",
":",
"self",
".",
"is_fw_drvr_created",
"(",
")",
",",
"'pol_present'",
":",
"self",
".",
"active_pol_id",
"in",
"self",
".",
"policies",
",",
"'fw_type'",
":",
"self",
".",
"fw_type",
"}",
")",
"if",
"self",
".",
"active_pol_id",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"In Drvr create needed %(len_policy)s %(one_rule)s\"",
",",
"{",
"'len_policy'",
":",
"len",
"(",
"self",
".",
"policies",
"[",
"self",
".",
"active_pol_id",
"]",
"[",
"'rule_dict'",
"]",
")",
",",
"'one_rule'",
":",
"self",
".",
"one_rule_present",
"(",
"self",
".",
"active_pol_id",
")",
"}",
")",
"return",
"self",
".",
"fw_created",
"and",
"self",
".",
"active_pol_id",
"and",
"(",
"self",
".",
"is_fw_drvr_created",
"(",
")",
")",
"and",
"self",
".",
"fw_type",
"and",
"(",
"self",
".",
"active_pol_id",
"in",
"self",
".",
"policies",
")",
"and",
"(",
"len",
"(",
"self",
".",
"policies",
"[",
"self",
".",
"active_pol_id",
"]",
"[",
"'rule_dict'",
"]",
")",
")",
">",
"0",
"and",
"(",
"self",
".",
"one_rule_present",
"(",
"self",
".",
"active_pol_id",
")",
")"
] | This API returns the completion status of FW.
This returns True if a FW is created with a active policy that has
more than one rule associated with it and if a driver init is done
successfully. | [
"This",
"API",
"returns",
"the",
"completion",
"status",
"of",
"FW",
"."
] | python | train | 51.307692 |
cloudflare/sqlalchemy-clickhouse | connector.py | https://github.com/cloudflare/sqlalchemy-clickhouse/blob/fc46142445d4510566f6412964df2fb9d2f4bd2e/connector.py#L260-L283 | def fetchmany(self, size=None):
"""Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
"""
if self._state == self._STATE_NONE:
raise Exception("No query yet")
if size is None:
size = 1
if not self._data:
return []
else:
if len(self._data) > size:
result, self._data = self._data[:size], self._data[size:]
else:
result, self._data = self._data, []
self._rownumber += len(result)
return result | [
"def",
"fetchmany",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"_STATE_NONE",
":",
"raise",
"Exception",
"(",
"\"No query yet\"",
")",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"1",
"if",
"not",
"self",
".",
"_data",
":",
"return",
"[",
"]",
"else",
":",
"if",
"len",
"(",
"self",
".",
"_data",
")",
">",
"size",
":",
"result",
",",
"self",
".",
"_data",
"=",
"self",
".",
"_data",
"[",
":",
"size",
"]",
",",
"self",
".",
"_data",
"[",
"size",
":",
"]",
"else",
":",
"result",
",",
"self",
".",
"_data",
"=",
"self",
".",
"_data",
",",
"[",
"]",
"self",
".",
"_rownumber",
"+=",
"len",
"(",
"result",
")",
"return",
"result"
] | Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned. | [
"Fetch",
"the",
"next",
"set",
"of",
"rows",
"of",
"a",
"query",
"result",
"returning",
"a",
"sequence",
"of",
"sequences",
"(",
"e",
".",
"g",
".",
"a",
"list",
"of",
"tuples",
")",
".",
"An",
"empty",
"sequence",
"is",
"returned",
"when",
"no",
"more",
"rows",
"are",
"available",
"."
] | python | train | 43.166667 |
ejeschke/ginga | ginga/ImageView.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L1965-L1983 | def get_pixel_distance(self, x1, y1, x2, y2):
"""Calculate distance between the given pixel positions.
Parameters
----------
x1, y1, x2, y2 : number
Pixel coordinates.
Returns
-------
dist : float
Rounded distance.
"""
dx = abs(x2 - x1)
dy = abs(y2 - y1)
dist = np.sqrt(dx * dx + dy * dy)
dist = np.round(dist)
return dist | [
"def",
"get_pixel_distance",
"(",
"self",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
":",
"dx",
"=",
"abs",
"(",
"x2",
"-",
"x1",
")",
"dy",
"=",
"abs",
"(",
"y2",
"-",
"y1",
")",
"dist",
"=",
"np",
".",
"sqrt",
"(",
"dx",
"*",
"dx",
"+",
"dy",
"*",
"dy",
")",
"dist",
"=",
"np",
".",
"round",
"(",
"dist",
")",
"return",
"dist"
] | Calculate distance between the given pixel positions.
Parameters
----------
x1, y1, x2, y2 : number
Pixel coordinates.
Returns
-------
dist : float
Rounded distance. | [
"Calculate",
"distance",
"between",
"the",
"given",
"pixel",
"positions",
"."
] | python | train | 22.894737 |
Workiva/furious | furious/async.py | https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L305-L327 | def to_task(self):
"""Return a task object representing this async job."""
from google.appengine.api.taskqueue import Task
from google.appengine.api.taskqueue import TaskRetryOptions
self._increment_recursion_level()
self.check_recursion_depth()
url = "%s/%s" % (ASYNC_ENDPOINT, self.function_path)
kwargs = {
'url': url,
'headers': self.get_headers().copy(),
'payload': json.dumps(self.to_dict())
}
kwargs.update(copy.deepcopy(self.get_task_args()))
# Set task_retry_limit
retry_options = copy.deepcopy(DEFAULT_RETRY_OPTIONS)
retry_options.update(kwargs.pop('retry_options', {}))
kwargs['retry_options'] = TaskRetryOptions(**retry_options)
return Task(**kwargs) | [
"def",
"to_task",
"(",
"self",
")",
":",
"from",
"google",
".",
"appengine",
".",
"api",
".",
"taskqueue",
"import",
"Task",
"from",
"google",
".",
"appengine",
".",
"api",
".",
"taskqueue",
"import",
"TaskRetryOptions",
"self",
".",
"_increment_recursion_level",
"(",
")",
"self",
".",
"check_recursion_depth",
"(",
")",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"ASYNC_ENDPOINT",
",",
"self",
".",
"function_path",
")",
"kwargs",
"=",
"{",
"'url'",
":",
"url",
",",
"'headers'",
":",
"self",
".",
"get_headers",
"(",
")",
".",
"copy",
"(",
")",
",",
"'payload'",
":",
"json",
".",
"dumps",
"(",
"self",
".",
"to_dict",
"(",
")",
")",
"}",
"kwargs",
".",
"update",
"(",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"get_task_args",
"(",
")",
")",
")",
"# Set task_retry_limit",
"retry_options",
"=",
"copy",
".",
"deepcopy",
"(",
"DEFAULT_RETRY_OPTIONS",
")",
"retry_options",
".",
"update",
"(",
"kwargs",
".",
"pop",
"(",
"'retry_options'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'retry_options'",
"]",
"=",
"TaskRetryOptions",
"(",
"*",
"*",
"retry_options",
")",
"return",
"Task",
"(",
"*",
"*",
"kwargs",
")"
] | Return a task object representing this async job. | [
"Return",
"a",
"task",
"object",
"representing",
"this",
"async",
"job",
"."
] | python | train | 34.478261 |
Genida/archan | src/archan/config.py | https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/config.py#L315-L335 | def inflate_plugins(self, plugins_definition, inflate_method):
"""
Inflate multiple plugins based on a list/dict definition.
Args:
plugins_definition (list/dict): the plugins definitions.
inflate_method (method): the method to indlate each plugin.
Returns:
list: a list of plugin instances.
Raises:
ValueError: when the definition type is not list or dict.
"""
if isinstance(plugins_definition, list):
return self.inflate_plugin_list(plugins_definition, inflate_method)
elif isinstance(plugins_definition, dict):
return self.inflate_plugin_dict(plugins_definition, inflate_method)
else:
raise ValueError('%s type is not supported for a plugin list, '
'use list or dict' % type(plugins_definition)) | [
"def",
"inflate_plugins",
"(",
"self",
",",
"plugins_definition",
",",
"inflate_method",
")",
":",
"if",
"isinstance",
"(",
"plugins_definition",
",",
"list",
")",
":",
"return",
"self",
".",
"inflate_plugin_list",
"(",
"plugins_definition",
",",
"inflate_method",
")",
"elif",
"isinstance",
"(",
"plugins_definition",
",",
"dict",
")",
":",
"return",
"self",
".",
"inflate_plugin_dict",
"(",
"plugins_definition",
",",
"inflate_method",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'%s type is not supported for a plugin list, '",
"'use list or dict'",
"%",
"type",
"(",
"plugins_definition",
")",
")"
] | Inflate multiple plugins based on a list/dict definition.
Args:
plugins_definition (list/dict): the plugins definitions.
inflate_method (method): the method to indlate each plugin.
Returns:
list: a list of plugin instances.
Raises:
ValueError: when the definition type is not list or dict. | [
"Inflate",
"multiple",
"plugins",
"based",
"on",
"a",
"list",
"/",
"dict",
"definition",
"."
] | python | train | 41.190476 |
inasafe/inasafe | safe/utilities/styling.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/styling.py#L38-L74 | def mmi_ramp_roman(raster_layer):
"""Generate an mmi ramp using range of 1-10 on roman.
A standarised range is used so that two shakemaps of different
intensities can be properly compared visually with colours stretched
accross the same range.
The colours used are the 'standard' colours commonly shown for the
mercalli scale e.g. on wikipedia and other sources.
:param raster_layer: A raster layer that will have an mmi style applied.
:type raster_layer: QgsRasterLayer
.. versionadded:: 4.0
"""
items = []
sorted_mmi_scale = sorted(
earthquake_mmi_scale['classes'], key=itemgetter('value'))
for class_max in sorted_mmi_scale:
colour = class_max['color']
label = '%s' % class_max['key']
ramp_item = QgsColorRampShader.ColorRampItem(
class_max['value'], colour, label)
items.append(ramp_item)
raster_shader = QgsRasterShader()
ramp_shader = QgsColorRampShader()
ramp_shader.setColorRampType(QgsColorRampShader.Interpolated)
ramp_shader.setColorRampItemList(items)
raster_shader.setRasterShaderFunction(ramp_shader)
band = 1
renderer = QgsSingleBandPseudoColorRenderer(
raster_layer.dataProvider(),
band,
raster_shader)
raster_layer.setRenderer(renderer) | [
"def",
"mmi_ramp_roman",
"(",
"raster_layer",
")",
":",
"items",
"=",
"[",
"]",
"sorted_mmi_scale",
"=",
"sorted",
"(",
"earthquake_mmi_scale",
"[",
"'classes'",
"]",
",",
"key",
"=",
"itemgetter",
"(",
"'value'",
")",
")",
"for",
"class_max",
"in",
"sorted_mmi_scale",
":",
"colour",
"=",
"class_max",
"[",
"'color'",
"]",
"label",
"=",
"'%s'",
"%",
"class_max",
"[",
"'key'",
"]",
"ramp_item",
"=",
"QgsColorRampShader",
".",
"ColorRampItem",
"(",
"class_max",
"[",
"'value'",
"]",
",",
"colour",
",",
"label",
")",
"items",
".",
"append",
"(",
"ramp_item",
")",
"raster_shader",
"=",
"QgsRasterShader",
"(",
")",
"ramp_shader",
"=",
"QgsColorRampShader",
"(",
")",
"ramp_shader",
".",
"setColorRampType",
"(",
"QgsColorRampShader",
".",
"Interpolated",
")",
"ramp_shader",
".",
"setColorRampItemList",
"(",
"items",
")",
"raster_shader",
".",
"setRasterShaderFunction",
"(",
"ramp_shader",
")",
"band",
"=",
"1",
"renderer",
"=",
"QgsSingleBandPseudoColorRenderer",
"(",
"raster_layer",
".",
"dataProvider",
"(",
")",
",",
"band",
",",
"raster_shader",
")",
"raster_layer",
".",
"setRenderer",
"(",
"renderer",
")"
] | Generate an mmi ramp using range of 1-10 on roman.
A standarised range is used so that two shakemaps of different
intensities can be properly compared visually with colours stretched
accross the same range.
The colours used are the 'standard' colours commonly shown for the
mercalli scale e.g. on wikipedia and other sources.
:param raster_layer: A raster layer that will have an mmi style applied.
:type raster_layer: QgsRasterLayer
.. versionadded:: 4.0 | [
"Generate",
"an",
"mmi",
"ramp",
"using",
"range",
"of",
"1",
"-",
"10",
"on",
"roman",
"."
] | python | train | 34.675676 |
pypa/pipenv | pipenv/vendor/distlib/_backport/tarfile.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L2372-L2392 | def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner") | [
"def",
"chown",
"(",
"self",
",",
"tarinfo",
",",
"targetpath",
")",
":",
"if",
"pwd",
"and",
"hasattr",
"(",
"os",
",",
"\"geteuid\"",
")",
"and",
"os",
".",
"geteuid",
"(",
")",
"==",
"0",
":",
"# We have to be root to do so.",
"try",
":",
"g",
"=",
"grp",
".",
"getgrnam",
"(",
"tarinfo",
".",
"gname",
")",
"[",
"2",
"]",
"except",
"KeyError",
":",
"g",
"=",
"tarinfo",
".",
"gid",
"try",
":",
"u",
"=",
"pwd",
".",
"getpwnam",
"(",
"tarinfo",
".",
"uname",
")",
"[",
"2",
"]",
"except",
"KeyError",
":",
"u",
"=",
"tarinfo",
".",
"uid",
"try",
":",
"if",
"tarinfo",
".",
"issym",
"(",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"lchown\"",
")",
":",
"os",
".",
"lchown",
"(",
"targetpath",
",",
"u",
",",
"g",
")",
"else",
":",
"if",
"sys",
".",
"platform",
"!=",
"\"os2emx\"",
":",
"os",
".",
"chown",
"(",
"targetpath",
",",
"u",
",",
"g",
")",
"except",
"EnvironmentError",
"as",
"e",
":",
"raise",
"ExtractError",
"(",
"\"could not change owner\"",
")"
] | Set owner of targetpath according to tarinfo. | [
"Set",
"owner",
"of",
"targetpath",
"according",
"to",
"tarinfo",
"."
] | python | train | 38.285714 |
twosigma/beakerx | beakerx/setupbase.py | https://github.com/twosigma/beakerx/blob/404de61ed627d9daaf6b77eb4859e7cb6f37413f/beakerx/setupbase.py#L296-L320 | def run_gradle(path=kernel_path, cmd='build', skip_tests=False):
"""Return a Command for running gradle scripts.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
cmd: str, optional
The command to run with gradlew.
"""
class Gradle(BaseCommand):
description = 'Run gradle script'
def skip_test_option(self, skip):
if skip:
return '-Dskip.tests=True'
else:
return '-Dskip.tests=False'
def run(self):
run([('' if sys.platform == 'win32' else './') + 'gradlew', '--no-daemon', cmd,
self.skip_test_option(skip_tests)], cwd=path)
return Gradle | [
"def",
"run_gradle",
"(",
"path",
"=",
"kernel_path",
",",
"cmd",
"=",
"'build'",
",",
"skip_tests",
"=",
"False",
")",
":",
"class",
"Gradle",
"(",
"BaseCommand",
")",
":",
"description",
"=",
"'Run gradle script'",
"def",
"skip_test_option",
"(",
"self",
",",
"skip",
")",
":",
"if",
"skip",
":",
"return",
"'-Dskip.tests=True'",
"else",
":",
"return",
"'-Dskip.tests=False'",
"def",
"run",
"(",
"self",
")",
":",
"run",
"(",
"[",
"(",
"''",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
"else",
"'./'",
")",
"+",
"'gradlew'",
",",
"'--no-daemon'",
",",
"cmd",
",",
"self",
".",
"skip_test_option",
"(",
"skip_tests",
")",
"]",
",",
"cwd",
"=",
"path",
")",
"return",
"Gradle"
] | Return a Command for running gradle scripts.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
cmd: str, optional
The command to run with gradlew. | [
"Return",
"a",
"Command",
"for",
"running",
"gradle",
"scripts",
"."
] | python | train | 29.24 |
joke2k/django-environ | environ/environ.py | https://github.com/joke2k/django-environ/blob/c2620021614557abe197578f99deeef42af3e082/environ/environ.py#L495-L537 | def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config | [
"def",
"email_url_config",
"(",
"cls",
",",
"url",
",",
"backend",
"=",
"None",
")",
":",
"config",
"=",
"{",
"}",
"url",
"=",
"urlparse",
"(",
"url",
")",
"if",
"not",
"isinstance",
"(",
"url",
",",
"cls",
".",
"URL_CLASS",
")",
"else",
"url",
"# Remove query strings",
"path",
"=",
"url",
".",
"path",
"[",
"1",
":",
"]",
"path",
"=",
"unquote_plus",
"(",
"path",
".",
"split",
"(",
"'?'",
",",
"2",
")",
"[",
"0",
"]",
")",
"# Update with environment configuration",
"config",
".",
"update",
"(",
"{",
"'EMAIL_FILE_PATH'",
":",
"path",
",",
"'EMAIL_HOST_USER'",
":",
"_cast_urlstr",
"(",
"url",
".",
"username",
")",
",",
"'EMAIL_HOST_PASSWORD'",
":",
"_cast_urlstr",
"(",
"url",
".",
"password",
")",
",",
"'EMAIL_HOST'",
":",
"url",
".",
"hostname",
",",
"'EMAIL_PORT'",
":",
"_cast_int",
"(",
"url",
".",
"port",
")",
",",
"}",
")",
"if",
"backend",
":",
"config",
"[",
"'EMAIL_BACKEND'",
"]",
"=",
"backend",
"elif",
"url",
".",
"scheme",
"not",
"in",
"cls",
".",
"EMAIL_SCHEMES",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Invalid email schema %s'",
"%",
"url",
".",
"scheme",
")",
"elif",
"url",
".",
"scheme",
"in",
"cls",
".",
"EMAIL_SCHEMES",
":",
"config",
"[",
"'EMAIL_BACKEND'",
"]",
"=",
"cls",
".",
"EMAIL_SCHEMES",
"[",
"url",
".",
"scheme",
"]",
"if",
"url",
".",
"scheme",
"in",
"(",
"'smtps'",
",",
"'smtp+tls'",
")",
":",
"config",
"[",
"'EMAIL_USE_TLS'",
"]",
"=",
"True",
"elif",
"url",
".",
"scheme",
"==",
"'smtp+ssl'",
":",
"config",
"[",
"'EMAIL_USE_SSL'",
"]",
"=",
"True",
"if",
"url",
".",
"query",
":",
"config_options",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"parse_qs",
"(",
"url",
".",
"query",
")",
".",
"items",
"(",
")",
":",
"opt",
"=",
"{",
"k",
".",
"upper",
"(",
")",
":",
"_cast_int",
"(",
"v",
"[",
"0",
"]",
")",
"}",
"if",
"k",
".",
"upper",
"(",
")",
"in",
"cls",
".",
"_EMAIL_BASE_OPTIONS",
":",
"config",
".",
"update",
"(",
"opt",
")",
"else",
":",
"config_options",
".",
"update",
"(",
"opt",
")",
"config",
"[",
"'OPTIONS'",
"]",
"=",
"config_options",
"return",
"config"
] | Parses an email URL. | [
"Parses",
"an",
"email",
"URL",
"."
] | python | train | 33.813953 |
rhayes777/PyAutoFit | autofit/conf.py | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/conf.py#L128-L153 | def get_for_nearest_ancestor(self, cls, attribute_name):
"""
Find a prior with the attribute analysis_path from the config for this class or one of its ancestors
Parameters
----------
cls: class
The class of interest
attribute_name: String
The analysis_path of the attribute
Returns
-------
prior_array: []
An array describing this prior
"""
for family_cls in family(cls):
if self.has(family_cls.__module__, family_cls.__name__, attribute_name):
return self.get(family_cls.__module__, family_cls.__name__, attribute_name)
ini_filename = cls.__module__.split(".")[-1]
raise exc.PriorException(
"The prior config at {}/{} does not contain {} in {} or any of its parents".format(self.path,
ini_filename,
attribute_name,
cls.__name__
)) | [
"def",
"get_for_nearest_ancestor",
"(",
"self",
",",
"cls",
",",
"attribute_name",
")",
":",
"for",
"family_cls",
"in",
"family",
"(",
"cls",
")",
":",
"if",
"self",
".",
"has",
"(",
"family_cls",
".",
"__module__",
",",
"family_cls",
".",
"__name__",
",",
"attribute_name",
")",
":",
"return",
"self",
".",
"get",
"(",
"family_cls",
".",
"__module__",
",",
"family_cls",
".",
"__name__",
",",
"attribute_name",
")",
"ini_filename",
"=",
"cls",
".",
"__module__",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"raise",
"exc",
".",
"PriorException",
"(",
"\"The prior config at {}/{} does not contain {} in {} or any of its parents\"",
".",
"format",
"(",
"self",
".",
"path",
",",
"ini_filename",
",",
"attribute_name",
",",
"cls",
".",
"__name__",
")",
")"
] | Find a prior with the attribute analysis_path from the config for this class or one of its ancestors
Parameters
----------
cls: class
The class of interest
attribute_name: String
The analysis_path of the attribute
Returns
-------
prior_array: []
An array describing this prior | [
"Find",
"a",
"prior",
"with",
"the",
"attribute",
"analysis_path",
"from",
"the",
"config",
"for",
"this",
"class",
"or",
"one",
"of",
"its",
"ancestors"
] | python | train | 48.807692 |
buruzaemon/natto-py | natto/environment.py | https://github.com/buruzaemon/natto-py/blob/018fe004c47c45c66bdf2e03fe24e981ae089b76/natto/environment.py#L164-L199 | def __regkey_value(self, path, name='', start_key=None):
r'''Return the data of value mecabrc at MeCab HKEY node.
On Windows, the path to the mecabrc as set in the Windows Registry is
used to deduce the path to libmecab.dll.
Returns:
The full path to the mecabrc on Windows.
Raises:
WindowsError: A problem was encountered in trying to locate the
value mecabrc at HKEY_CURRENT_USER\Software\MeCab.
'''
if sys.version < '3':
import _winreg as reg
else:
import winreg as reg
def _fn(path, name='', start_key=None):
if isinstance(path, str):
path = path.split('\\')
if start_key is None:
start_key = getattr(reg, path[0])
return _fn(path[1:], name, start_key)
else:
subkey = path.pop(0)
with reg.OpenKey(start_key, subkey) as handle:
if path:
return _fn(path, name, handle)
else:
desc, i = None, 0
while not desc or desc[0] != name:
desc = reg.EnumValue(handle, i)
i += 1
return desc[1]
return _fn(path, name, start_key) | [
"def",
"__regkey_value",
"(",
"self",
",",
"path",
",",
"name",
"=",
"''",
",",
"start_key",
"=",
"None",
")",
":",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"import",
"_winreg",
"as",
"reg",
"else",
":",
"import",
"winreg",
"as",
"reg",
"def",
"_fn",
"(",
"path",
",",
"name",
"=",
"''",
",",
"start_key",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"path",
"=",
"path",
".",
"split",
"(",
"'\\\\'",
")",
"if",
"start_key",
"is",
"None",
":",
"start_key",
"=",
"getattr",
"(",
"reg",
",",
"path",
"[",
"0",
"]",
")",
"return",
"_fn",
"(",
"path",
"[",
"1",
":",
"]",
",",
"name",
",",
"start_key",
")",
"else",
":",
"subkey",
"=",
"path",
".",
"pop",
"(",
"0",
")",
"with",
"reg",
".",
"OpenKey",
"(",
"start_key",
",",
"subkey",
")",
"as",
"handle",
":",
"if",
"path",
":",
"return",
"_fn",
"(",
"path",
",",
"name",
",",
"handle",
")",
"else",
":",
"desc",
",",
"i",
"=",
"None",
",",
"0",
"while",
"not",
"desc",
"or",
"desc",
"[",
"0",
"]",
"!=",
"name",
":",
"desc",
"=",
"reg",
".",
"EnumValue",
"(",
"handle",
",",
"i",
")",
"i",
"+=",
"1",
"return",
"desc",
"[",
"1",
"]",
"return",
"_fn",
"(",
"path",
",",
"name",
",",
"start_key",
")"
] | r'''Return the data of value mecabrc at MeCab HKEY node.
On Windows, the path to the mecabrc as set in the Windows Registry is
used to deduce the path to libmecab.dll.
Returns:
The full path to the mecabrc on Windows.
Raises:
WindowsError: A problem was encountered in trying to locate the
value mecabrc at HKEY_CURRENT_USER\Software\MeCab. | [
"r",
"Return",
"the",
"data",
"of",
"value",
"mecabrc",
"at",
"MeCab",
"HKEY",
"node",
".",
"On",
"Windows",
"the",
"path",
"to",
"the",
"mecabrc",
"as",
"set",
"in",
"the",
"Windows",
"Registry",
"is",
"used",
"to",
"deduce",
"the",
"path",
"to",
"libmecab",
".",
"dll",
".",
"Returns",
":",
"The",
"full",
"path",
"to",
"the",
"mecabrc",
"on",
"Windows",
".",
"Raises",
":",
"WindowsError",
":",
"A",
"problem",
"was",
"encountered",
"in",
"trying",
"to",
"locate",
"the",
"value",
"mecabrc",
"at",
"HKEY_CURRENT_USER",
"\\",
"Software",
"\\",
"MeCab",
"."
] | python | train | 37.138889 |
mitsei/dlkit | dlkit/json_/assessment/mixins.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/mixins.py#L494-L529 | def get_questions(self, answered=None, honor_sequential=True, update=True):
"""gets all available questions for this section
if answered == False: only return next unanswered question
if answered == True: only return next answered question
if answered in None: return next question whether answered or not
if honor_sequential == True: only return questions if section or part
is set to sequential items
"""
def update_question_list():
"""Supportive function to aid readability of _get_questions."""
latest_question_response = question_map['responses'][0]
question_answered = False
# take missingResponse == UNANSWERED or NULL_RESPONSE as an unanswered question
if 'missingResponse' not in latest_question_response:
question_answered = True
if answered is None or answered == question_answered:
question_list.append(self.get_question(question_map=question_map))
return question_answered
prev_question_answered = True
question_list = []
if update:
self._update_questions() # Make sure questions list is current
for question_map in self._my_map['questions']:
if self._is_question_sequential(question_map) and honor_sequential:
if prev_question_answered:
prev_question_answered = update_question_list()
else:
update_question_list()
if self._my_map['actualStartTime'] is None:
self._my_map['actualStartTime'] = DateTime.utcnow()
return QuestionList(question_list, runtime=self._runtime, proxy=self._proxy) | [
"def",
"get_questions",
"(",
"self",
",",
"answered",
"=",
"None",
",",
"honor_sequential",
"=",
"True",
",",
"update",
"=",
"True",
")",
":",
"def",
"update_question_list",
"(",
")",
":",
"\"\"\"Supportive function to aid readability of _get_questions.\"\"\"",
"latest_question_response",
"=",
"question_map",
"[",
"'responses'",
"]",
"[",
"0",
"]",
"question_answered",
"=",
"False",
"# take missingResponse == UNANSWERED or NULL_RESPONSE as an unanswered question",
"if",
"'missingResponse'",
"not",
"in",
"latest_question_response",
":",
"question_answered",
"=",
"True",
"if",
"answered",
"is",
"None",
"or",
"answered",
"==",
"question_answered",
":",
"question_list",
".",
"append",
"(",
"self",
".",
"get_question",
"(",
"question_map",
"=",
"question_map",
")",
")",
"return",
"question_answered",
"prev_question_answered",
"=",
"True",
"question_list",
"=",
"[",
"]",
"if",
"update",
":",
"self",
".",
"_update_questions",
"(",
")",
"# Make sure questions list is current",
"for",
"question_map",
"in",
"self",
".",
"_my_map",
"[",
"'questions'",
"]",
":",
"if",
"self",
".",
"_is_question_sequential",
"(",
"question_map",
")",
"and",
"honor_sequential",
":",
"if",
"prev_question_answered",
":",
"prev_question_answered",
"=",
"update_question_list",
"(",
")",
"else",
":",
"update_question_list",
"(",
")",
"if",
"self",
".",
"_my_map",
"[",
"'actualStartTime'",
"]",
"is",
"None",
":",
"self",
".",
"_my_map",
"[",
"'actualStartTime'",
"]",
"=",
"DateTime",
".",
"utcnow",
"(",
")",
"return",
"QuestionList",
"(",
"question_list",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
] | gets all available questions for this section
if answered == False: only return next unanswered question
if answered == True: only return next answered question
if answered in None: return next question whether answered or not
if honor_sequential == True: only return questions if section or part
is set to sequential items | [
"gets",
"all",
"available",
"questions",
"for",
"this",
"section"
] | python | train | 48 |
d0c-s4vage/pfp | pfp/interp.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L353-L364 | def add_local(self, field_name, field):
"""Add a local variable in the current scope
:field_name: The field's name
:field: The field
:returns: None
"""
self._dlog("adding local '{}'".format(field_name))
field._pfp__name = field_name
# TODO do we allow clobbering of locals???
self._curr_scope["vars"][field_name] = field | [
"def",
"add_local",
"(",
"self",
",",
"field_name",
",",
"field",
")",
":",
"self",
".",
"_dlog",
"(",
"\"adding local '{}'\"",
".",
"format",
"(",
"field_name",
")",
")",
"field",
".",
"_pfp__name",
"=",
"field_name",
"# TODO do we allow clobbering of locals???",
"self",
".",
"_curr_scope",
"[",
"\"vars\"",
"]",
"[",
"field_name",
"]",
"=",
"field"
] | Add a local variable in the current scope
:field_name: The field's name
:field: The field
:returns: None | [
"Add",
"a",
"local",
"variable",
"in",
"the",
"current",
"scope"
] | python | train | 31.916667 |
potash/drain | drain/data.py | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L44-L56 | def apply(self, df):
"""Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`.
"""
if hasattr(self.definition, '__call__'):
r = self.definition(df)
elif self.definition in df.columns:
r = df[self.definition]
elif not isinstance(self.definition, string_types):
r = pd.Series(self.definition, index=df.index)
else:
raise ValueError("Invalid column definition: %s" % str(self.definition))
return r.astype(self.astype) if self.astype else r | [
"def",
"apply",
"(",
"self",
",",
"df",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"definition",
",",
"'__call__'",
")",
":",
"r",
"=",
"self",
".",
"definition",
"(",
"df",
")",
"elif",
"self",
".",
"definition",
"in",
"df",
".",
"columns",
":",
"r",
"=",
"df",
"[",
"self",
".",
"definition",
"]",
"elif",
"not",
"isinstance",
"(",
"self",
".",
"definition",
",",
"string_types",
")",
":",
"r",
"=",
"pd",
".",
"Series",
"(",
"self",
".",
"definition",
",",
"index",
"=",
"df",
".",
"index",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid column definition: %s\"",
"%",
"str",
"(",
"self",
".",
"definition",
")",
")",
"return",
"r",
".",
"astype",
"(",
"self",
".",
"astype",
")",
"if",
"self",
".",
"astype",
"else",
"r"
] | Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`. | [
"Takes",
"a",
"pd",
".",
"DataFrame",
"and",
"returns",
"the",
"newly",
"defined",
"column",
"i",
".",
"e",
".",
"a",
"pd",
".",
"Series",
"that",
"has",
"the",
"same",
"index",
"as",
"df",
"."
] | python | train | 45.384615 |
readbeyond/aeneas | aeneas/validator.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/validator.py#L380-L410 | def check_config_xml(self, contents):
"""
Check whether the given XML config file contents
is well-formed and it has all the required parameters.
:param string contents: the XML config file contents or XML config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u"Checking contents XML config file")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_config_xml"):
return self.result
contents = gf.safe_bytes(contents)
self.log(u"Checking that contents is well formed")
self.check_raw_string(contents, is_bstring=True)
if not self.result.passed:
return self.result
self.log(u"Checking required parameters for job")
job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True)
self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters)
if not self.result.passed:
return self.result
self.log(u"Checking required parameters for task")
tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False)
for parameters in tasks_parameters:
self.log([u"Checking required parameters for task: '%s'", parameters])
self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters)
if not self.result.passed:
return self.result
return self.result | [
"def",
"check_config_xml",
"(",
"self",
",",
"contents",
")",
":",
"self",
".",
"log",
"(",
"u\"Checking contents XML config file\"",
")",
"self",
".",
"result",
"=",
"ValidatorResult",
"(",
")",
"if",
"self",
".",
"_are_safety_checks_disabled",
"(",
"u\"check_config_xml\"",
")",
":",
"return",
"self",
".",
"result",
"contents",
"=",
"gf",
".",
"safe_bytes",
"(",
"contents",
")",
"self",
".",
"log",
"(",
"u\"Checking that contents is well formed\"",
")",
"self",
".",
"check_raw_string",
"(",
"contents",
",",
"is_bstring",
"=",
"True",
")",
"if",
"not",
"self",
".",
"result",
".",
"passed",
":",
"return",
"self",
".",
"result",
"self",
".",
"log",
"(",
"u\"Checking required parameters for job\"",
")",
"job_parameters",
"=",
"gf",
".",
"config_xml_to_dict",
"(",
"contents",
",",
"self",
".",
"result",
",",
"parse_job",
"=",
"True",
")",
"self",
".",
"_check_required_parameters",
"(",
"self",
".",
"XML_JOB_REQUIRED_PARAMETERS",
",",
"job_parameters",
")",
"if",
"not",
"self",
".",
"result",
".",
"passed",
":",
"return",
"self",
".",
"result",
"self",
".",
"log",
"(",
"u\"Checking required parameters for task\"",
")",
"tasks_parameters",
"=",
"gf",
".",
"config_xml_to_dict",
"(",
"contents",
",",
"self",
".",
"result",
",",
"parse_job",
"=",
"False",
")",
"for",
"parameters",
"in",
"tasks_parameters",
":",
"self",
".",
"log",
"(",
"[",
"u\"Checking required parameters for task: '%s'\"",
",",
"parameters",
"]",
")",
"self",
".",
"_check_required_parameters",
"(",
"self",
".",
"XML_TASK_REQUIRED_PARAMETERS",
",",
"parameters",
")",
"if",
"not",
"self",
".",
"result",
".",
"passed",
":",
"return",
"self",
".",
"result",
"return",
"self",
".",
"result"
] | Check whether the given XML config file contents
is well-formed and it has all the required parameters.
:param string contents: the XML config file contents or XML config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult` | [
"Check",
"whether",
"the",
"given",
"XML",
"config",
"file",
"contents",
"is",
"well",
"-",
"formed",
"and",
"it",
"has",
"all",
"the",
"required",
"parameters",
"."
] | python | train | 50.193548 |
pycontribs/jira | jira/client.py | https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L3426-L3498 | def create_project(self, key, name=None, assignee=None, type="Software", template_name=None):
"""Create a project with the specified parameters.
:param key: Mandatory. Must match JIRA project key requirements, usually only 2-10 uppercase characters.
:type: str
:param name: If not specified it will use the key value.
:type name: Optional[str]
:param assignee: If not specified it will use current user.
:type assignee: Optional[str]
:param type: Determines the type of project should be created.
:type type: Optional[str]
:param template_name: is used to create a project based on one of the existing project templates.
If `template_name` is not specified, then it should use one of the default values.
:type template_name: Optional[str]
:return: Should evaluate to False if it fails otherwise it will be the new project id.
:rtype: Union[bool,int]
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
possible_templates = ['Basic', 'JIRA Classic', 'JIRA Default Schemes', 'Basic software development']
if template_name is not None:
possible_templates = [template_name]
# https://confluence.atlassian.com/jirakb/creating-a-project-via-rest-based-on-jira-default-schemes-744325852.html
templates = self.templates()
# TODO(ssbarnea): find a better logic to pick a default fallback template
template_key = list(templates.values())[0]['projectTemplateModuleCompleteKey']
for template_name, template_dic in templates.items():
if template_name in possible_templates:
template_key = template_dic['projectTemplateModuleCompleteKey']
break
payload = {'name': name,
'key': key,
'keyEdited': 'false',
# 'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
# 'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
# 'assigneeType': '2',
}
if self._version[0] > 6:
# JIRA versions before 7 will throw an error if we specify type parameter
payload['type'] = type
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
url = self._options['server'] + \
'/rest/project-templates/latest/templates'
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False | [
"def",
"create_project",
"(",
"self",
",",
"key",
",",
"name",
"=",
"None",
",",
"assignee",
"=",
"None",
",",
"type",
"=",
"\"Software\"",
",",
"template_name",
"=",
"None",
")",
":",
"if",
"assignee",
"is",
"None",
":",
"assignee",
"=",
"self",
".",
"current_user",
"(",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"key",
"possible_templates",
"=",
"[",
"'Basic'",
",",
"'JIRA Classic'",
",",
"'JIRA Default Schemes'",
",",
"'Basic software development'",
"]",
"if",
"template_name",
"is",
"not",
"None",
":",
"possible_templates",
"=",
"[",
"template_name",
"]",
"# https://confluence.atlassian.com/jirakb/creating-a-project-via-rest-based-on-jira-default-schemes-744325852.html",
"templates",
"=",
"self",
".",
"templates",
"(",
")",
"# TODO(ssbarnea): find a better logic to pick a default fallback template",
"template_key",
"=",
"list",
"(",
"templates",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"[",
"'projectTemplateModuleCompleteKey'",
"]",
"for",
"template_name",
",",
"template_dic",
"in",
"templates",
".",
"items",
"(",
")",
":",
"if",
"template_name",
"in",
"possible_templates",
":",
"template_key",
"=",
"template_dic",
"[",
"'projectTemplateModuleCompleteKey'",
"]",
"break",
"payload",
"=",
"{",
"'name'",
":",
"name",
",",
"'key'",
":",
"key",
",",
"'keyEdited'",
":",
"'false'",
",",
"# 'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',",
"# 'permissionScheme': '',",
"'projectTemplateWebItemKey'",
":",
"template_key",
",",
"'projectTemplateModuleKey'",
":",
"template_key",
",",
"'lead'",
":",
"assignee",
",",
"# 'assigneeType': '2',",
"}",
"if",
"self",
".",
"_version",
"[",
"0",
"]",
">",
"6",
":",
"# JIRA versions before 7 will throw an error if we specify type parameter",
"payload",
"[",
"'type'",
"]",
"=",
"type",
"headers",
"=",
"CaseInsensitiveDict",
"(",
"{",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
"}",
")",
"url",
"=",
"self",
".",
"_options",
"[",
"'server'",
"]",
"+",
"'/rest/project-templates/latest/templates'",
"r",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"payload",
",",
"headers",
"=",
"headers",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"r_json",
"=",
"json_loads",
"(",
"r",
")",
"return",
"r_json",
"f",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.html'",
",",
"prefix",
"=",
"'python-jira-error-create-project-'",
",",
"delete",
"=",
"False",
")",
"f",
".",
"write",
"(",
"r",
".",
"text",
")",
"if",
"self",
".",
"logging",
":",
"logging",
".",
"error",
"(",
"\"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s].\"",
"%",
"(",
"f",
".",
"name",
",",
"r",
".",
"status_code",
")",
")",
"return",
"False"
] | Create a project with the specified parameters.
:param key: Mandatory. Must match JIRA project key requirements, usually only 2-10 uppercase characters.
:type: str
:param name: If not specified it will use the key value.
:type name: Optional[str]
:param assignee: If not specified it will use current user.
:type assignee: Optional[str]
:param type: Determines the type of project should be created.
:type type: Optional[str]
:param template_name: is used to create a project based on one of the existing project templates.
If `template_name` is not specified, then it should use one of the default values.
:type template_name: Optional[str]
:return: Should evaluate to False if it fails otherwise it will be the new project id.
:rtype: Union[bool,int] | [
"Create",
"a",
"project",
"with",
"the",
"specified",
"parameters",
"."
] | python | train | 43.69863 |
python-diamond/Diamond | src/collectors/onewire/onewire.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/onewire/onewire.py#L36-L47 | def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OneWireCollector, self).get_default_config()
config.update({
'path': 'owfs',
'owfs': '/mnt/1wire',
# 'scan': {'temperature': 't'},
# 'id:24.BB000000': {'file_with_value': 'alias'},
})
return config | [
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"OneWireCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'owfs'",
",",
"'owfs'",
":",
"'/mnt/1wire'",
",",
"# 'scan': {'temperature': 't'},",
"# 'id:24.BB000000': {'file_with_value': 'alias'},",
"}",
")",
"return",
"config"
] | Returns the default collector settings | [
"Returns",
"the",
"default",
"collector",
"settings"
] | python | train | 31.833333 |
nirum/descent | descent/proxops.py | https://github.com/nirum/descent/blob/074c8452f15a0da638668a4fe139fde06ccfae7f/descent/proxops.py#L269-L300 | def fantope(x, rho, dim, tol=1e-4):
"""
Projection onto the fantope [1]_
.. [1] Vu, Vincent Q., et al. "Fantope projection and selection: A
near-optimal convex relaxation of sparse PCA." Advances in
neural information processing systems. 2013.
"""
U, V = np.linalg.eigh(x)
minval, maxval = np.maximum(U.min(), 0), np.maximum(U.max(), 20 * dim)
while True:
theta = 0.5 * (maxval + minval)
thr_eigvals = np.minimum(np.maximum((U - theta), 0), 1)
constraint = np.sum(thr_eigvals)
if np.abs(constraint - dim) <= tol:
break
elif constraint < dim:
maxval = theta
elif constraint > dim:
minval = theta
else:
break
return np.linalg.multi_dot((V, np.diag(thr_eigvals), V.T)) | [
"def",
"fantope",
"(",
"x",
",",
"rho",
",",
"dim",
",",
"tol",
"=",
"1e-4",
")",
":",
"U",
",",
"V",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"x",
")",
"minval",
",",
"maxval",
"=",
"np",
".",
"maximum",
"(",
"U",
".",
"min",
"(",
")",
",",
"0",
")",
",",
"np",
".",
"maximum",
"(",
"U",
".",
"max",
"(",
")",
",",
"20",
"*",
"dim",
")",
"while",
"True",
":",
"theta",
"=",
"0.5",
"*",
"(",
"maxval",
"+",
"minval",
")",
"thr_eigvals",
"=",
"np",
".",
"minimum",
"(",
"np",
".",
"maximum",
"(",
"(",
"U",
"-",
"theta",
")",
",",
"0",
")",
",",
"1",
")",
"constraint",
"=",
"np",
".",
"sum",
"(",
"thr_eigvals",
")",
"if",
"np",
".",
"abs",
"(",
"constraint",
"-",
"dim",
")",
"<=",
"tol",
":",
"break",
"elif",
"constraint",
"<",
"dim",
":",
"maxval",
"=",
"theta",
"elif",
"constraint",
">",
"dim",
":",
"minval",
"=",
"theta",
"else",
":",
"break",
"return",
"np",
".",
"linalg",
".",
"multi_dot",
"(",
"(",
"V",
",",
"np",
".",
"diag",
"(",
"thr_eigvals",
")",
",",
"V",
".",
"T",
")",
")"
] | Projection onto the fantope [1]_
.. [1] Vu, Vincent Q., et al. "Fantope projection and selection: A
near-optimal convex relaxation of sparse PCA." Advances in
neural information processing systems. 2013. | [
"Projection",
"onto",
"the",
"fantope",
"[",
"1",
"]",
"_"
] | python | valid | 25.0625 |
tyiannak/pyAudioAnalysis | pyAudioAnalysis/audioBasicIO.py | https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioBasicIO.py#L5-L38 | def convertDirMP3ToWav(dirName, Fs, nC, useMp3TagsAsName = False):
'''
This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used.
ARGUMENTS:
- dirName: the path of the folder where the MP3s are stored
- Fs: the sampling rate of the generated WAV files
- nC: the number of channels of the generated WAV files
- useMp3TagsAsName: True if the WAV filename is generated on MP3 tags
'''
types = (dirName+os.sep+'*.mp3',) # the tuple of file types
filesToProcess = []
for files in types:
filesToProcess.extend(glob.glob(files))
for f in filesToProcess:
#tag.link(f)
audioFile = eyed3.load(f)
if useMp3TagsAsName and audioFile.tag != None:
artist = audioFile.tag.artist
title = audioFile.tag.title
if artist!=None and title!=None:
if len(title)>0 and len(artist)>0:
wavFileName = ntpath.split(f)[0] + os.sep + artist.replace(","," ") + " --- " + title.replace(","," ") + ".wav"
else:
wavFileName = f.replace(".mp3",".wav")
else:
wavFileName = f.replace(".mp3",".wav")
else:
wavFileName = f.replace(".mp3",".wav")
command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + wavFileName + "\"";
print(command)
os.system(command.decode('unicode_escape').encode('ascii','ignore').replace("\0","")) | [
"def",
"convertDirMP3ToWav",
"(",
"dirName",
",",
"Fs",
",",
"nC",
",",
"useMp3TagsAsName",
"=",
"False",
")",
":",
"types",
"=",
"(",
"dirName",
"+",
"os",
".",
"sep",
"+",
"'*.mp3'",
",",
")",
"# the tuple of file types",
"filesToProcess",
"=",
"[",
"]",
"for",
"files",
"in",
"types",
":",
"filesToProcess",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"files",
")",
")",
"for",
"f",
"in",
"filesToProcess",
":",
"#tag.link(f)",
"audioFile",
"=",
"eyed3",
".",
"load",
"(",
"f",
")",
"if",
"useMp3TagsAsName",
"and",
"audioFile",
".",
"tag",
"!=",
"None",
":",
"artist",
"=",
"audioFile",
".",
"tag",
".",
"artist",
"title",
"=",
"audioFile",
".",
"tag",
".",
"title",
"if",
"artist",
"!=",
"None",
"and",
"title",
"!=",
"None",
":",
"if",
"len",
"(",
"title",
")",
">",
"0",
"and",
"len",
"(",
"artist",
")",
">",
"0",
":",
"wavFileName",
"=",
"ntpath",
".",
"split",
"(",
"f",
")",
"[",
"0",
"]",
"+",
"os",
".",
"sep",
"+",
"artist",
".",
"replace",
"(",
"\",\"",
",",
"\" \"",
")",
"+",
"\" --- \"",
"+",
"title",
".",
"replace",
"(",
"\",\"",
",",
"\" \"",
")",
"+",
"\".wav\"",
"else",
":",
"wavFileName",
"=",
"f",
".",
"replace",
"(",
"\".mp3\"",
",",
"\".wav\"",
")",
"else",
":",
"wavFileName",
"=",
"f",
".",
"replace",
"(",
"\".mp3\"",
",",
"\".wav\"",
")",
"else",
":",
"wavFileName",
"=",
"f",
".",
"replace",
"(",
"\".mp3\"",
",",
"\".wav\"",
")",
"command",
"=",
"\"avconv -i \\\"\"",
"+",
"f",
"+",
"\"\\\" -ar \"",
"+",
"str",
"(",
"Fs",
")",
"+",
"\" -ac \"",
"+",
"str",
"(",
"nC",
")",
"+",
"\" \\\"\"",
"+",
"wavFileName",
"+",
"\"\\\"\"",
"print",
"(",
"command",
")",
"os",
".",
"system",
"(",
"command",
".",
"decode",
"(",
"'unicode_escape'",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
".",
"replace",
"(",
"\"\\0\"",
",",
"\"\"",
")",
")"
] | This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used.
ARGUMENTS:
- dirName: the path of the folder where the MP3s are stored
- Fs: the sampling rate of the generated WAV files
- nC: the number of channels of the generated WAV files
- useMp3TagsAsName: True if the WAV filename is generated on MP3 tags | [
"This",
"function",
"converts",
"the",
"MP3",
"files",
"stored",
"in",
"a",
"folder",
"to",
"WAV",
".",
"If",
"required",
"the",
"output",
"names",
"of",
"the",
"WAV",
"files",
"are",
"based",
"on",
"MP3",
"tags",
"otherwise",
"the",
"same",
"names",
"are",
"used",
".",
"ARGUMENTS",
":",
"-",
"dirName",
":",
"the",
"path",
"of",
"the",
"folder",
"where",
"the",
"MP3s",
"are",
"stored",
"-",
"Fs",
":",
"the",
"sampling",
"rate",
"of",
"the",
"generated",
"WAV",
"files",
"-",
"nC",
":",
"the",
"number",
"of",
"channels",
"of",
"the",
"generated",
"WAV",
"files",
"-",
"useMp3TagsAsName",
":",
"True",
"if",
"the",
"WAV",
"filename",
"is",
"generated",
"on",
"MP3",
"tags"
] | python | train | 48.117647 |
MostAwesomeDude/gentleman | gentleman/base.py | https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L1236-L1253 | def DeleteGroup(r, group, dry_run=False):
"""
Deletes a node group.
@type group: str
@param group: the node group to delete
@type dry_run: bool
@param dry_run: whether to peform a dry run
@rtype: int
@return: job id
"""
query = {
"dry-run": dry_run,
}
return r.request("delete", "/2/groups/%s" % group, query=query) | [
"def",
"DeleteGroup",
"(",
"r",
",",
"group",
",",
"dry_run",
"=",
"False",
")",
":",
"query",
"=",
"{",
"\"dry-run\"",
":",
"dry_run",
",",
"}",
"return",
"r",
".",
"request",
"(",
"\"delete\"",
",",
"\"/2/groups/%s\"",
"%",
"group",
",",
"query",
"=",
"query",
")"
] | Deletes a node group.
@type group: str
@param group: the node group to delete
@type dry_run: bool
@param dry_run: whether to peform a dry run
@rtype: int
@return: job id | [
"Deletes",
"a",
"node",
"group",
"."
] | python | train | 19.888889 |
ucsb-cs/submit | submit/helpers.py | https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/helpers.py#L434-L470 | def prepare_renderable(request, test_case_result, is_admin):
"""Return a completed Renderable."""
test_case = test_case_result.test_case
file_directory = request.registry.settings['file_directory']
sha1 = test_case_result.diff.sha1 if test_case_result.diff else None
kwargs = {'number': test_case.id, 'group': test_case.testable.name,
'name': test_case.name, 'points': test_case.points,
'status': test_case_result.status,
'extra': test_case_result.extra}
if test_case.output_type == 'image':
url = request.route_path('file_item', filename='_', _query={'raw': 1},
sha1sum=sha1) if sha1 else None
return ImageOutput(url=url, **kwargs)
elif test_case.output_type == 'text':
content = None
if sha1:
with open(File.file_path(file_directory, sha1)) as fp:
content = fp.read()
return TextOutput(content=content, **kwargs)
elif not test_case_result.diff: # Outputs match
return DiffWithMetadata(diff=None, **kwargs)
try:
with open(File.file_path(file_directory, sha1)) as fp:
diff = pickle.load(fp)
except (AttributeError, EOFError):
content = 'submit system mismatch -- requeue submission'
content += traceback.format_exc(1)
return TextOutput(content=content, **kwargs)
except Exception:
content = 'unexected error -- requeue submission\n'
content += traceback.format_exc(1)
return TextOutput(content=content, **kwargs)
diff.hide_expected = not is_admin and test_case.hide_expected
return DiffWithMetadata(diff=diff, **kwargs) | [
"def",
"prepare_renderable",
"(",
"request",
",",
"test_case_result",
",",
"is_admin",
")",
":",
"test_case",
"=",
"test_case_result",
".",
"test_case",
"file_directory",
"=",
"request",
".",
"registry",
".",
"settings",
"[",
"'file_directory'",
"]",
"sha1",
"=",
"test_case_result",
".",
"diff",
".",
"sha1",
"if",
"test_case_result",
".",
"diff",
"else",
"None",
"kwargs",
"=",
"{",
"'number'",
":",
"test_case",
".",
"id",
",",
"'group'",
":",
"test_case",
".",
"testable",
".",
"name",
",",
"'name'",
":",
"test_case",
".",
"name",
",",
"'points'",
":",
"test_case",
".",
"points",
",",
"'status'",
":",
"test_case_result",
".",
"status",
",",
"'extra'",
":",
"test_case_result",
".",
"extra",
"}",
"if",
"test_case",
".",
"output_type",
"==",
"'image'",
":",
"url",
"=",
"request",
".",
"route_path",
"(",
"'file_item'",
",",
"filename",
"=",
"'_'",
",",
"_query",
"=",
"{",
"'raw'",
":",
"1",
"}",
",",
"sha1sum",
"=",
"sha1",
")",
"if",
"sha1",
"else",
"None",
"return",
"ImageOutput",
"(",
"url",
"=",
"url",
",",
"*",
"*",
"kwargs",
")",
"elif",
"test_case",
".",
"output_type",
"==",
"'text'",
":",
"content",
"=",
"None",
"if",
"sha1",
":",
"with",
"open",
"(",
"File",
".",
"file_path",
"(",
"file_directory",
",",
"sha1",
")",
")",
"as",
"fp",
":",
"content",
"=",
"fp",
".",
"read",
"(",
")",
"return",
"TextOutput",
"(",
"content",
"=",
"content",
",",
"*",
"*",
"kwargs",
")",
"elif",
"not",
"test_case_result",
".",
"diff",
":",
"# Outputs match",
"return",
"DiffWithMetadata",
"(",
"diff",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"with",
"open",
"(",
"File",
".",
"file_path",
"(",
"file_directory",
",",
"sha1",
")",
")",
"as",
"fp",
":",
"diff",
"=",
"pickle",
".",
"load",
"(",
"fp",
")",
"except",
"(",
"AttributeError",
",",
"EOFError",
")",
":",
"content",
"=",
"'submit system mismatch -- requeue submission'",
"content",
"+=",
"traceback",
".",
"format_exc",
"(",
"1",
")",
"return",
"TextOutput",
"(",
"content",
"=",
"content",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"content",
"=",
"'unexected error -- requeue submission\\n'",
"content",
"+=",
"traceback",
".",
"format_exc",
"(",
"1",
")",
"return",
"TextOutput",
"(",
"content",
"=",
"content",
",",
"*",
"*",
"kwargs",
")",
"diff",
".",
"hide_expected",
"=",
"not",
"is_admin",
"and",
"test_case",
".",
"hide_expected",
"return",
"DiffWithMetadata",
"(",
"diff",
"=",
"diff",
",",
"*",
"*",
"kwargs",
")"
] | Return a completed Renderable. | [
"Return",
"a",
"completed",
"Renderable",
"."
] | python | train | 44.810811 |
ARMmbed/mbed-cloud-sdk-python | scripts/tag_and_release.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/scripts/tag_and_release.py#L32-L45 | def git_url_ssh_to_https(url):
"""Convert a git url
url will look like
https://github.com/ARMmbed/mbed-cloud-sdk-python.git
or
git@github.com:ARMmbed/mbed-cloud-sdk-python.git
we want:
https://${GITHUB_TOKEN}@github.com/ARMmbed/mbed-cloud-sdk-python-private.git
"""
path = url.split('github.com', 1)[1][1:].strip()
new = 'https://{GITHUB_TOKEN}@github.com/%s' % path
print('rewriting git url to: %s' % new)
return new.format(GITHUB_TOKEN=os.getenv('GITHUB_TOKEN')) | [
"def",
"git_url_ssh_to_https",
"(",
"url",
")",
":",
"path",
"=",
"url",
".",
"split",
"(",
"'github.com'",
",",
"1",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"new",
"=",
"'https://{GITHUB_TOKEN}@github.com/%s'",
"%",
"path",
"print",
"(",
"'rewriting git url to: %s'",
"%",
"new",
")",
"return",
"new",
".",
"format",
"(",
"GITHUB_TOKEN",
"=",
"os",
".",
"getenv",
"(",
"'GITHUB_TOKEN'",
")",
")"
] | Convert a git url
url will look like
https://github.com/ARMmbed/mbed-cloud-sdk-python.git
or
git@github.com:ARMmbed/mbed-cloud-sdk-python.git
we want:
https://${GITHUB_TOKEN}@github.com/ARMmbed/mbed-cloud-sdk-python-private.git | [
"Convert",
"a",
"git",
"url"
] | python | train | 35.714286 |
chaddotson/noaa_radar | noaa_radar/radar.py | https://github.com/chaddotson/noaa_radar/blob/ebb1e8d87d4b35b8942867446deced74b22a47cc/noaa_radar/radar.py#L112-L141 | def get_composite_reflectivity(self, tower_id, background='#000000', include_legend=True, include_counties=True,
include_warnings=True, include_highways=True, include_cities=True,
include_rivers=True, include_topography=True):
"""
Get the composite reflectivity for a noaa radar site.
:param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'.
:type tower_id: str
:param background: The hex background color.
:type background: str
:param include_legend: True - include legend.
:type include_legend: bool
:param include_counties: True - include county lines.
:type include_counties: bool
:param include_warnings: True - include warning lines.
:type include_warnings: bool
:param include_highways: True - include highways.
:type include_highways: bool
:param include_cities: True - include city labels.
:type include_cities: bool
:param include_rivers: True - include rivers
:type include_rivers: bool
:param include_topography: True - include topography
:type include_topography: bool
:rtype: PIL.Image
:return: A PIL.Image instance with the Radar composite reflectivity.
"""
return self._build_radar_image(tower_id, "NCR", background=background, include_legend=include_legend,
include_counties=include_counties, include_warnings=include_warnings,
include_highways=include_highways, include_cities=include_cities,
include_rivers=include_rivers, include_topography=include_topography) | [
"def",
"get_composite_reflectivity",
"(",
"self",
",",
"tower_id",
",",
"background",
"=",
"'#000000'",
",",
"include_legend",
"=",
"True",
",",
"include_counties",
"=",
"True",
",",
"include_warnings",
"=",
"True",
",",
"include_highways",
"=",
"True",
",",
"include_cities",
"=",
"True",
",",
"include_rivers",
"=",
"True",
",",
"include_topography",
"=",
"True",
")",
":",
"return",
"self",
".",
"_build_radar_image",
"(",
"tower_id",
",",
"\"NCR\"",
",",
"background",
"=",
"background",
",",
"include_legend",
"=",
"include_legend",
",",
"include_counties",
"=",
"include_counties",
",",
"include_warnings",
"=",
"include_warnings",
",",
"include_highways",
"=",
"include_highways",
",",
"include_cities",
"=",
"include_cities",
",",
"include_rivers",
"=",
"include_rivers",
",",
"include_topography",
"=",
"include_topography",
")"
] | Get the composite reflectivity for a noaa radar site.
:param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'.
:type tower_id: str
:param background: The hex background color.
:type background: str
:param include_legend: True - include legend.
:type include_legend: bool
:param include_counties: True - include county lines.
:type include_counties: bool
:param include_warnings: True - include warning lines.
:type include_warnings: bool
:param include_highways: True - include highways.
:type include_highways: bool
:param include_cities: True - include city labels.
:type include_cities: bool
:param include_rivers: True - include rivers
:type include_rivers: bool
:param include_topography: True - include topography
:type include_topography: bool
:rtype: PIL.Image
:return: A PIL.Image instance with the Radar composite reflectivity. | [
"Get",
"the",
"composite",
"reflectivity",
"for",
"a",
"noaa",
"radar",
"site",
".",
":",
"param",
"tower_id",
":",
"The",
"noaa",
"tower",
"id",
".",
"Ex",
"Huntsville",
"Al",
"-",
">",
"HTX",
".",
":",
"type",
"tower_id",
":",
"str",
":",
"param",
"background",
":",
"The",
"hex",
"background",
"color",
".",
":",
"type",
"background",
":",
"str",
":",
"param",
"include_legend",
":",
"True",
"-",
"include",
"legend",
".",
":",
"type",
"include_legend",
":",
"bool",
":",
"param",
"include_counties",
":",
"True",
"-",
"include",
"county",
"lines",
".",
":",
"type",
"include_counties",
":",
"bool",
":",
"param",
"include_warnings",
":",
"True",
"-",
"include",
"warning",
"lines",
".",
":",
"type",
"include_warnings",
":",
"bool",
":",
"param",
"include_highways",
":",
"True",
"-",
"include",
"highways",
".",
":",
"type",
"include_highways",
":",
"bool",
":",
"param",
"include_cities",
":",
"True",
"-",
"include",
"city",
"labels",
".",
":",
"type",
"include_cities",
":",
"bool",
":",
"param",
"include_rivers",
":",
"True",
"-",
"include",
"rivers",
":",
"type",
"include_rivers",
":",
"bool",
":",
"param",
"include_topography",
":",
"True",
"-",
"include",
"topography",
":",
"type",
"include_topography",
":",
"bool",
":",
"rtype",
":",
"PIL",
".",
"Image",
":",
"return",
":",
"A",
"PIL",
".",
"Image",
"instance",
"with",
"the",
"Radar",
"composite",
"reflectivity",
"."
] | python | train | 57.933333 |
willkg/everett | everett/manager.py | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L199-L214 | def get_key_from_envs(envs, key):
"""Return the value of a key from the given dict respecting namespaces.
Data can also be a list of data dicts.
"""
# if it barks like a dict, make it a list have to use `get` since dicts and
# lists both have __getitem__
if hasattr(envs, 'get'):
envs = [envs]
for env in envs:
if key in env:
return env[key]
return NO_VALUE | [
"def",
"get_key_from_envs",
"(",
"envs",
",",
"key",
")",
":",
"# if it barks like a dict, make it a list have to use `get` since dicts and",
"# lists both have __getitem__",
"if",
"hasattr",
"(",
"envs",
",",
"'get'",
")",
":",
"envs",
"=",
"[",
"envs",
"]",
"for",
"env",
"in",
"envs",
":",
"if",
"key",
"in",
"env",
":",
"return",
"env",
"[",
"key",
"]",
"return",
"NO_VALUE"
] | Return the value of a key from the given dict respecting namespaces.
Data can also be a list of data dicts. | [
"Return",
"the",
"value",
"of",
"a",
"key",
"from",
"the",
"given",
"dict",
"respecting",
"namespaces",
"."
] | python | train | 25.375 |
huyingxi/Synonyms | synonyms/utils.py | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L222-L227 | def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8') | [
"def",
"any2utf8",
"(",
"text",
",",
"errors",
"=",
"'strict'",
",",
"encoding",
"=",
"'utf8'",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"unicode",
")",
":",
"return",
"text",
".",
"encode",
"(",
"'utf8'",
")",
"# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8",
"return",
"unicode",
"(",
"text",
",",
"encoding",
",",
"errors",
"=",
"errors",
")",
".",
"encode",
"(",
"'utf8'",
")"
] | Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8. | [
"Convert",
"a",
"string",
"(",
"unicode",
"or",
"bytestring",
"in",
"encoding",
")",
"to",
"bytestring",
"in",
"utf8",
"."
] | python | train | 57.333333 |
kubernetes-client/python | kubernetes/client/apis/apps_v1_api.py | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/apps_v1_api.py#L3100-L3126 | def list_stateful_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_stateful_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
return data | [
"def",
"list_stateful_set_for_all_namespaces",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"list_stateful_set_for_all_namespaces_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_stateful_set_for_all_namespaces_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_stateful_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread. | [
"list",
"or",
"watch",
"objects",
"of",
"kind",
"StatefulSet",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"pass",
"async_req",
"=",
"True",
">>>",
"thread",
"=",
"api",
".",
"list_stateful_set_for_all_namespaces",
"(",
"async_req",
"=",
"True",
")",
">>>",
"result",
"=",
"thread",
".",
"get",
"()"
] | python | train | 169.222222 |
mlperf/training | reinforcement/tensorflow/minigo/ml_perf/reference_implementation.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/ml_perf/reference_implementation.py#L348-L357 | async def evaluate_trained_model(state):
"""Evaluate the most recently trained model against the current best model.
Args:
state: the RL loop State instance.
"""
return await evaluate_model(
state.train_model_path, state.best_model_path,
os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed) | [
"async",
"def",
"evaluate_trained_model",
"(",
"state",
")",
":",
"return",
"await",
"evaluate_model",
"(",
"state",
".",
"train_model_path",
",",
"state",
".",
"best_model_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"fsdb",
".",
"eval_dir",
"(",
")",
",",
"state",
".",
"train_model_name",
")",
",",
"state",
".",
"seed",
")"
] | Evaluate the most recently trained model against the current best model.
Args:
state: the RL loop State instance. | [
"Evaluate",
"the",
"most",
"recently",
"trained",
"model",
"against",
"the",
"current",
"best",
"model",
"."
] | python | train | 32.1 |
kwikteam/phy | phy/io/array.py | https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L414-L425 | def excerpts(n_samples, n_excerpts=None, excerpt_size=None):
"""Yield (start, end) where start is included and end is excluded."""
assert n_excerpts >= 2
step = _excerpt_step(n_samples,
n_excerpts=n_excerpts,
excerpt_size=excerpt_size)
for i in range(n_excerpts):
start = i * step
if start >= n_samples:
break
end = min(start + excerpt_size, n_samples)
yield start, end | [
"def",
"excerpts",
"(",
"n_samples",
",",
"n_excerpts",
"=",
"None",
",",
"excerpt_size",
"=",
"None",
")",
":",
"assert",
"n_excerpts",
">=",
"2",
"step",
"=",
"_excerpt_step",
"(",
"n_samples",
",",
"n_excerpts",
"=",
"n_excerpts",
",",
"excerpt_size",
"=",
"excerpt_size",
")",
"for",
"i",
"in",
"range",
"(",
"n_excerpts",
")",
":",
"start",
"=",
"i",
"*",
"step",
"if",
"start",
">=",
"n_samples",
":",
"break",
"end",
"=",
"min",
"(",
"start",
"+",
"excerpt_size",
",",
"n_samples",
")",
"yield",
"start",
",",
"end"
] | Yield (start, end) where start is included and end is excluded. | [
"Yield",
"(",
"start",
"end",
")",
"where",
"start",
"is",
"included",
"and",
"end",
"is",
"excluded",
"."
] | python | train | 39 |
agamdua/mixtures | mixtures/mixtures.py | https://github.com/agamdua/mixtures/blob/9c67f3684ddac53d8a636a4353a266e98d09e54c/mixtures/mixtures.py#L50-L61 | def get_fields(model_class):
"""
Pass in a mongo model class and extract all the attributes which
are mongoengine fields
Returns:
list of strings of field attributes
"""
return [
attr for attr, value in model_class.__dict__.items()
if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField)) # noqa
] | [
"def",
"get_fields",
"(",
"model_class",
")",
":",
"return",
"[",
"attr",
"for",
"attr",
",",
"value",
"in",
"model_class",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"issubclass",
"(",
"type",
"(",
"value",
")",
",",
"(",
"mongo",
".",
"base",
".",
"BaseField",
",",
"mongo",
".",
"EmbeddedDocumentField",
")",
")",
"# noqa",
"]"
] | Pass in a mongo model class and extract all the attributes which
are mongoengine fields
Returns:
list of strings of field attributes | [
"Pass",
"in",
"a",
"mongo",
"model",
"class",
"and",
"extract",
"all",
"the",
"attributes",
"which",
"are",
"mongoengine",
"fields"
] | python | train | 30.25 |
datadesk/django-bakery | bakery/static_views.py | https://github.com/datadesk/django-bakery/blob/e2feb13a66552a388fbcfaaacdd504bba08d3c69/bakery/static_views.py#L19-L83 | def serve(request, path, document_root=None, show_indexes=False, default=''):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve',
{'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
Modified by ticket #1013 to serve index.html files in the same manner
as Apache and other web servers.
https://code.djangoproject.com/ticket/1013
"""
# Clean up given path to only allow serving files below document_root.
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath) and default:
defaultpath = os.path.join(fullpath, default)
if os.path.exists(defaultpath):
fullpath = defaultpath
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
if django.VERSION > (1, 6):
return HttpResponseNotModified(content_type=mimetype)
else:
return HttpResponseNotModified(mimetype=mimetype)
contents = open(fullpath, 'rb').read()
if django.VERSION > (1, 6):
response = HttpResponse(contents, content_type=mimetype)
else:
response = HttpResponse(contents, mimetype=mimetype)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
response["Content-Length"] = len(contents)
return response | [
"def",
"serve",
"(",
"request",
",",
"path",
",",
"document_root",
"=",
"None",
",",
"show_indexes",
"=",
"False",
",",
"default",
"=",
"''",
")",
":",
"# Clean up given path to only allow serving files below document_root.",
"path",
"=",
"posixpath",
".",
"normpath",
"(",
"unquote",
"(",
"path",
")",
")",
"path",
"=",
"path",
".",
"lstrip",
"(",
"'/'",
")",
"newpath",
"=",
"''",
"for",
"part",
"in",
"path",
".",
"split",
"(",
"'/'",
")",
":",
"if",
"not",
"part",
":",
"# Strip empty path components.",
"continue",
"drive",
",",
"part",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"part",
")",
"head",
",",
"part",
"=",
"os",
".",
"path",
".",
"split",
"(",
"part",
")",
"if",
"part",
"in",
"(",
"os",
".",
"curdir",
",",
"os",
".",
"pardir",
")",
":",
"# Strip '.' and '..' in path.",
"continue",
"newpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"newpath",
",",
"part",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"if",
"newpath",
"and",
"path",
"!=",
"newpath",
":",
"return",
"HttpResponseRedirect",
"(",
"newpath",
")",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"document_root",
",",
"newpath",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"fullpath",
")",
"and",
"default",
":",
"defaultpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fullpath",
",",
"default",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"defaultpath",
")",
":",
"fullpath",
"=",
"defaultpath",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"fullpath",
")",
":",
"if",
"show_indexes",
":",
"return",
"directory_index",
"(",
"newpath",
",",
"fullpath",
")",
"raise",
"Http404",
"(",
"\"Directory indexes are not allowed here.\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fullpath",
")",
":",
"raise",
"Http404",
"(",
"'\"%s\" does not exist'",
"%",
"fullpath",
")",
"# Respect the If-Modified-Since header.",
"statobj",
"=",
"os",
".",
"stat",
"(",
"fullpath",
")",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"fullpath",
")",
"[",
"0",
"]",
"or",
"'application/octet-stream'",
"if",
"not",
"was_modified_since",
"(",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_IF_MODIFIED_SINCE'",
")",
",",
"statobj",
"[",
"stat",
".",
"ST_MTIME",
"]",
",",
"statobj",
"[",
"stat",
".",
"ST_SIZE",
"]",
")",
":",
"if",
"django",
".",
"VERSION",
">",
"(",
"1",
",",
"6",
")",
":",
"return",
"HttpResponseNotModified",
"(",
"content_type",
"=",
"mimetype",
")",
"else",
":",
"return",
"HttpResponseNotModified",
"(",
"mimetype",
"=",
"mimetype",
")",
"contents",
"=",
"open",
"(",
"fullpath",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"if",
"django",
".",
"VERSION",
">",
"(",
"1",
",",
"6",
")",
":",
"response",
"=",
"HttpResponse",
"(",
"contents",
",",
"content_type",
"=",
"mimetype",
")",
"else",
":",
"response",
"=",
"HttpResponse",
"(",
"contents",
",",
"mimetype",
"=",
"mimetype",
")",
"response",
"[",
"\"Last-Modified\"",
"]",
"=",
"http_date",
"(",
"statobj",
"[",
"stat",
".",
"ST_MTIME",
"]",
")",
"response",
"[",
"\"Content-Length\"",
"]",
"=",
"len",
"(",
"contents",
")",
"return",
"response"
] | Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve',
{'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
Modified by ticket #1013 to serve index.html files in the same manner
as Apache and other web servers.
https://code.djangoproject.com/ticket/1013 | [
"Serve",
"static",
"files",
"below",
"a",
"given",
"point",
"in",
"the",
"directory",
"structure",
"."
] | python | train | 41.6 |
googlefonts/ufo2ft | Lib/ufo2ft/featureWriters/__init__.py | https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/__init__.py#L34-L59 | def isValidFeatureWriter(klass):
"""Return True if 'klass' is a valid feature writer class.
A valid feature writer class is a class (of type 'type'), that has
two required attributes:
1) 'tableTag' (str), which can be "GSUB", "GPOS", or other similar tags.
2) 'write' (bound method), with the signature matching the same method
from the BaseFeatureWriter class:
def write(self, font, feaFile, compiler=None)
"""
if not isclass(klass):
logger.error("%r is not a class", klass)
return False
if not hasattr(klass, "tableTag"):
logger.error("%r does not have required 'tableTag' attribute", klass)
return False
if not hasattr(klass, "write"):
logger.error("%r does not have a required 'write' method", klass)
return False
if (
getargspec(klass.write).args
!= getargspec(BaseFeatureWriter.write).args
):
logger.error("%r 'write' method has incorrect signature", klass)
return False
return True | [
"def",
"isValidFeatureWriter",
"(",
"klass",
")",
":",
"if",
"not",
"isclass",
"(",
"klass",
")",
":",
"logger",
".",
"error",
"(",
"\"%r is not a class\"",
",",
"klass",
")",
"return",
"False",
"if",
"not",
"hasattr",
"(",
"klass",
",",
"\"tableTag\"",
")",
":",
"logger",
".",
"error",
"(",
"\"%r does not have required 'tableTag' attribute\"",
",",
"klass",
")",
"return",
"False",
"if",
"not",
"hasattr",
"(",
"klass",
",",
"\"write\"",
")",
":",
"logger",
".",
"error",
"(",
"\"%r does not have a required 'write' method\"",
",",
"klass",
")",
"return",
"False",
"if",
"(",
"getargspec",
"(",
"klass",
".",
"write",
")",
".",
"args",
"!=",
"getargspec",
"(",
"BaseFeatureWriter",
".",
"write",
")",
".",
"args",
")",
":",
"logger",
".",
"error",
"(",
"\"%r 'write' method has incorrect signature\"",
",",
"klass",
")",
"return",
"False",
"return",
"True"
] | Return True if 'klass' is a valid feature writer class.
A valid feature writer class is a class (of type 'type'), that has
two required attributes:
1) 'tableTag' (str), which can be "GSUB", "GPOS", or other similar tags.
2) 'write' (bound method), with the signature matching the same method
from the BaseFeatureWriter class:
def write(self, font, feaFile, compiler=None) | [
"Return",
"True",
"if",
"klass",
"is",
"a",
"valid",
"feature",
"writer",
"class",
".",
"A",
"valid",
"feature",
"writer",
"class",
"is",
"a",
"class",
"(",
"of",
"type",
"type",
")",
"that",
"has",
"two",
"required",
"attributes",
":",
"1",
")",
"tableTag",
"(",
"str",
")",
"which",
"can",
"be",
"GSUB",
"GPOS",
"or",
"other",
"similar",
"tags",
".",
"2",
")",
"write",
"(",
"bound",
"method",
")",
"with",
"the",
"signature",
"matching",
"the",
"same",
"method",
"from",
"the",
"BaseFeatureWriter",
"class",
":"
] | python | train | 38.846154 |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L6659-L6730 | def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
"""displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()`
"""
filenameprefix = self.name_prefix
def printdatarow(dat, iteration):
"""print data of iteration i"""
i = np.where(dat.f[:, 0] == iteration)[0][0]
j = np.where(dat.std[:, 0] == iteration)[0][0]
print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) +
' %5.1e' % (dat.f[i, 3]) +
' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:]))
dat = CMADataLogger(filenameprefix).load()
ndata = dat.f.shape[0]
# map index to iteration number, is difficult if not all iteration numbers exist
# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
# otherwise:
if idx is None:
idx = 100
if isscalar(idx):
# idx = np.arange(0, ndata, idx)
if idx:
idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0]
else:
idx = np.r_[0, 1, -3:0]
idx = array(idx)
idx = idx[idx < ndata]
idx = idx[-idx <= ndata]
iters = dat.f[idx, 0]
idxbest = np.argmin(dat.f[:, 5])
iterbest = dat.f[idxbest, 0]
if len(iters) == 1:
printdatarow(dat, iters[0])
else:
self.disp_header()
for i in iters:
printdatarow(dat, i)
self.disp_header()
printdatarow(dat, iterbest)
sys.stdout.flush() | [
"def",
"disp",
"(",
"self",
",",
"idx",
"=",
"100",
")",
":",
"# r_[0:5,1e2:1e9:1e2,-10:0]):",
"filenameprefix",
"=",
"self",
".",
"name_prefix",
"def",
"printdatarow",
"(",
"dat",
",",
"iteration",
")",
":",
"\"\"\"print data of iteration i\"\"\"",
"i",
"=",
"np",
".",
"where",
"(",
"dat",
".",
"f",
"[",
":",
",",
"0",
"]",
"==",
"iteration",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"j",
"=",
"np",
".",
"where",
"(",
"dat",
".",
"std",
"[",
":",
",",
"0",
"]",
"==",
"iteration",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"print",
"(",
"'%5d'",
"%",
"(",
"int",
"(",
"dat",
".",
"f",
"[",
"i",
",",
"0",
"]",
")",
")",
"+",
"' %6d'",
"%",
"(",
"int",
"(",
"dat",
".",
"f",
"[",
"i",
",",
"1",
"]",
")",
")",
"+",
"' %.14e'",
"%",
"(",
"dat",
".",
"f",
"[",
"i",
",",
"5",
"]",
")",
"+",
"' %5.1e'",
"%",
"(",
"dat",
".",
"f",
"[",
"i",
",",
"3",
"]",
")",
"+",
"' %6.2e'",
"%",
"(",
"max",
"(",
"dat",
".",
"std",
"[",
"j",
",",
"5",
":",
"]",
")",
")",
"+",
"' %6.2e'",
"%",
"min",
"(",
"dat",
".",
"std",
"[",
"j",
",",
"5",
":",
"]",
")",
")",
"dat",
"=",
"CMADataLogger",
"(",
"filenameprefix",
")",
".",
"load",
"(",
")",
"ndata",
"=",
"dat",
".",
"f",
".",
"shape",
"[",
"0",
"]",
"# map index to iteration number, is difficult if not all iteration numbers exist",
"# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long",
"# otherwise:",
"if",
"idx",
"is",
"None",
":",
"idx",
"=",
"100",
"if",
"isscalar",
"(",
"idx",
")",
":",
"# idx = np.arange(0, ndata, idx)",
"if",
"idx",
":",
"idx",
"=",
"np",
".",
"r_",
"[",
"0",
",",
"1",
",",
"idx",
":",
"ndata",
"-",
"3",
":",
"idx",
",",
"-",
"3",
":",
"0",
"]",
"else",
":",
"idx",
"=",
"np",
".",
"r_",
"[",
"0",
",",
"1",
",",
"-",
"3",
":",
"0",
"]",
"idx",
"=",
"array",
"(",
"idx",
")",
"idx",
"=",
"idx",
"[",
"idx",
"<",
"ndata",
"]",
"idx",
"=",
"idx",
"[",
"-",
"idx",
"<=",
"ndata",
"]",
"iters",
"=",
"dat",
".",
"f",
"[",
"idx",
",",
"0",
"]",
"idxbest",
"=",
"np",
".",
"argmin",
"(",
"dat",
".",
"f",
"[",
":",
",",
"5",
"]",
")",
"iterbest",
"=",
"dat",
".",
"f",
"[",
"idxbest",
",",
"0",
"]",
"if",
"len",
"(",
"iters",
")",
"==",
"1",
":",
"printdatarow",
"(",
"dat",
",",
"iters",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"disp_header",
"(",
")",
"for",
"i",
"in",
"iters",
":",
"printdatarow",
"(",
"dat",
",",
"i",
")",
"self",
".",
"disp_header",
"(",
")",
"printdatarow",
"(",
"dat",
",",
"iterbest",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()` | [
"displays",
"selected",
"data",
"from",
"(",
"files",
"written",
"by",
")",
"the",
"class",
"CMADataLogger",
"."
] | python | train | 36.597222 |
kejbaly2/metrique | metrique/result.py | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L227-L275 | def get_dates_range(self, scale='auto', start=None, end=None,
date_max='2010-01-01'):
'''
Returns a list of dates sampled according to the specified parameters.
:param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly',
'quarterly', 'yearly'}
Scale specifies the sampling intervals.
'auto' will heuristically choose a scale for quick processing
:param start: First date that will be included.
:param end: Last date that will be included
'''
if scale not in ['auto', 'maximum', 'daily', 'weekly', 'monthly',
'quarterly', 'yearly']:
raise ValueError('Incorrect scale: %s' % scale)
start = Timestamp(start or self._start.min() or date_max)
# FIXME: start != start is true for NaN objects... is NaT the same?
start = Timestamp(date_max) if repr(start) == 'NaT' else start
end = Timestamp(end or max(Timestamp(self._end.max()),
self._start.max()))
# FIXME: end != end ?
end = datetime.utcnow() if repr(end) == 'NaT' else end
start = start if self.check_in_bounds(start) else self._lbound
end = end if self.check_in_bounds(end) else self._rbound
if scale == 'auto':
scale = self._auto_select_scale(start, end)
if scale == 'maximum':
start_dts = list(self._start.dropna().values)
end_dts = list(self._end.dropna().values)
dts = map(Timestamp, set(start_dts + end_dts))
dts = filter(lambda ts: self.check_in_bounds(ts) and
ts >= start and ts <= end, dts)
return dts
freq = dict(daily='D', weekly='W', monthly='M', quarterly='3M',
yearly='12M')
offset = dict(daily=off.Day(n=0), weekly=off.Week(),
monthly=off.MonthEnd(), quarterly=off.QuarterEnd(),
yearly=off.YearEnd())
# for some reason, weekly date range gives one week less:
end_ = end + off.Week() if scale == 'weekly' else end
ret = list(pd.date_range(start + offset[scale], end_,
freq=freq[scale]))
ret = [dt for dt in ret if dt <= end]
ret = [start] + ret if ret and start < ret[0] else ret
ret = ret + [end] if ret and end > ret[-1] else ret
ret = filter(lambda ts: self.check_in_bounds(ts), ret)
return ret | [
"def",
"get_dates_range",
"(",
"self",
",",
"scale",
"=",
"'auto'",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"date_max",
"=",
"'2010-01-01'",
")",
":",
"if",
"scale",
"not",
"in",
"[",
"'auto'",
",",
"'maximum'",
",",
"'daily'",
",",
"'weekly'",
",",
"'monthly'",
",",
"'quarterly'",
",",
"'yearly'",
"]",
":",
"raise",
"ValueError",
"(",
"'Incorrect scale: %s'",
"%",
"scale",
")",
"start",
"=",
"Timestamp",
"(",
"start",
"or",
"self",
".",
"_start",
".",
"min",
"(",
")",
"or",
"date_max",
")",
"# FIXME: start != start is true for NaN objects... is NaT the same?",
"start",
"=",
"Timestamp",
"(",
"date_max",
")",
"if",
"repr",
"(",
"start",
")",
"==",
"'NaT'",
"else",
"start",
"end",
"=",
"Timestamp",
"(",
"end",
"or",
"max",
"(",
"Timestamp",
"(",
"self",
".",
"_end",
".",
"max",
"(",
")",
")",
",",
"self",
".",
"_start",
".",
"max",
"(",
")",
")",
")",
"# FIXME: end != end ?",
"end",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"repr",
"(",
"end",
")",
"==",
"'NaT'",
"else",
"end",
"start",
"=",
"start",
"if",
"self",
".",
"check_in_bounds",
"(",
"start",
")",
"else",
"self",
".",
"_lbound",
"end",
"=",
"end",
"if",
"self",
".",
"check_in_bounds",
"(",
"end",
")",
"else",
"self",
".",
"_rbound",
"if",
"scale",
"==",
"'auto'",
":",
"scale",
"=",
"self",
".",
"_auto_select_scale",
"(",
"start",
",",
"end",
")",
"if",
"scale",
"==",
"'maximum'",
":",
"start_dts",
"=",
"list",
"(",
"self",
".",
"_start",
".",
"dropna",
"(",
")",
".",
"values",
")",
"end_dts",
"=",
"list",
"(",
"self",
".",
"_end",
".",
"dropna",
"(",
")",
".",
"values",
")",
"dts",
"=",
"map",
"(",
"Timestamp",
",",
"set",
"(",
"start_dts",
"+",
"end_dts",
")",
")",
"dts",
"=",
"filter",
"(",
"lambda",
"ts",
":",
"self",
".",
"check_in_bounds",
"(",
"ts",
")",
"and",
"ts",
">=",
"start",
"and",
"ts",
"<=",
"end",
",",
"dts",
")",
"return",
"dts",
"freq",
"=",
"dict",
"(",
"daily",
"=",
"'D'",
",",
"weekly",
"=",
"'W'",
",",
"monthly",
"=",
"'M'",
",",
"quarterly",
"=",
"'3M'",
",",
"yearly",
"=",
"'12M'",
")",
"offset",
"=",
"dict",
"(",
"daily",
"=",
"off",
".",
"Day",
"(",
"n",
"=",
"0",
")",
",",
"weekly",
"=",
"off",
".",
"Week",
"(",
")",
",",
"monthly",
"=",
"off",
".",
"MonthEnd",
"(",
")",
",",
"quarterly",
"=",
"off",
".",
"QuarterEnd",
"(",
")",
",",
"yearly",
"=",
"off",
".",
"YearEnd",
"(",
")",
")",
"# for some reason, weekly date range gives one week less:",
"end_",
"=",
"end",
"+",
"off",
".",
"Week",
"(",
")",
"if",
"scale",
"==",
"'weekly'",
"else",
"end",
"ret",
"=",
"list",
"(",
"pd",
".",
"date_range",
"(",
"start",
"+",
"offset",
"[",
"scale",
"]",
",",
"end_",
",",
"freq",
"=",
"freq",
"[",
"scale",
"]",
")",
")",
"ret",
"=",
"[",
"dt",
"for",
"dt",
"in",
"ret",
"if",
"dt",
"<=",
"end",
"]",
"ret",
"=",
"[",
"start",
"]",
"+",
"ret",
"if",
"ret",
"and",
"start",
"<",
"ret",
"[",
"0",
"]",
"else",
"ret",
"ret",
"=",
"ret",
"+",
"[",
"end",
"]",
"if",
"ret",
"and",
"end",
">",
"ret",
"[",
"-",
"1",
"]",
"else",
"ret",
"ret",
"=",
"filter",
"(",
"lambda",
"ts",
":",
"self",
".",
"check_in_bounds",
"(",
"ts",
")",
",",
"ret",
")",
"return",
"ret"
] | Returns a list of dates sampled according to the specified parameters.
:param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly',
'quarterly', 'yearly'}
Scale specifies the sampling intervals.
'auto' will heuristically choose a scale for quick processing
:param start: First date that will be included.
:param end: Last date that will be included | [
"Returns",
"a",
"list",
"of",
"dates",
"sampled",
"according",
"to",
"the",
"specified",
"parameters",
"."
] | python | train | 50.204082 |
cmaugg/pystatemachine | pystatemachine.py | https://github.com/cmaugg/pystatemachine/blob/5a6cd9cbd88180a86569cda1e564331753299c6c/pystatemachine.py#L151-L182 | def acts_as_state_machine(cls):
"""
a decorator which sets two properties on a class:
* the 'current_state' property: a read-only property, returning the state machine's current state, as 'State' object
* the 'states' property: a tuple of all valid state machine states, as 'State' objects
class objects may use current_state and states freely
:param cls:
:return:
"""
assert not hasattr(cls, 'current_state'), '{0} already has a "current_state" attribute!'.format(cls)
assert not hasattr(cls, 'states'), '{0} already has a "states" attribute!'.format(cls)
def get_states(obj):
return StateInfo.get_states(obj.__class__)
def is_transition_failure_handler(obj):
return all([
any([
inspect.ismethod(obj), # python2
inspect.isfunction(obj), # python3
]),
getattr(obj, '___pystatemachine_is_transition_failure_handler', False),
])
transition_failure_handlers = sorted(
[value for name, value in inspect.getmembers(cls, is_transition_failure_handler)],
key=lambda m: getattr(m, '___pystatemachine_transition_failure_handler_calling_sequence', 0),
)
setattr(cls, '___pystatemachine_transition_failure_handlers', transition_failure_handlers)
cls.current_state = property(fget=StateInfo.get_current_state)
cls.states = property(fget=get_states)
return cls | [
"def",
"acts_as_state_machine",
"(",
"cls",
")",
":",
"assert",
"not",
"hasattr",
"(",
"cls",
",",
"'current_state'",
")",
",",
"'{0} already has a \"current_state\" attribute!'",
".",
"format",
"(",
"cls",
")",
"assert",
"not",
"hasattr",
"(",
"cls",
",",
"'states'",
")",
",",
"'{0} already has a \"states\" attribute!'",
".",
"format",
"(",
"cls",
")",
"def",
"get_states",
"(",
"obj",
")",
":",
"return",
"StateInfo",
".",
"get_states",
"(",
"obj",
".",
"__class__",
")",
"def",
"is_transition_failure_handler",
"(",
"obj",
")",
":",
"return",
"all",
"(",
"[",
"any",
"(",
"[",
"inspect",
".",
"ismethod",
"(",
"obj",
")",
",",
"# python2",
"inspect",
".",
"isfunction",
"(",
"obj",
")",
",",
"# python3",
"]",
")",
",",
"getattr",
"(",
"obj",
",",
"'___pystatemachine_is_transition_failure_handler'",
",",
"False",
")",
",",
"]",
")",
"transition_failure_handlers",
"=",
"sorted",
"(",
"[",
"value",
"for",
"name",
",",
"value",
"in",
"inspect",
".",
"getmembers",
"(",
"cls",
",",
"is_transition_failure_handler",
")",
"]",
",",
"key",
"=",
"lambda",
"m",
":",
"getattr",
"(",
"m",
",",
"'___pystatemachine_transition_failure_handler_calling_sequence'",
",",
"0",
")",
",",
")",
"setattr",
"(",
"cls",
",",
"'___pystatemachine_transition_failure_handlers'",
",",
"transition_failure_handlers",
")",
"cls",
".",
"current_state",
"=",
"property",
"(",
"fget",
"=",
"StateInfo",
".",
"get_current_state",
")",
"cls",
".",
"states",
"=",
"property",
"(",
"fget",
"=",
"get_states",
")",
"return",
"cls"
] | a decorator which sets two properties on a class:
* the 'current_state' property: a read-only property, returning the state machine's current state, as 'State' object
* the 'states' property: a tuple of all valid state machine states, as 'State' objects
class objects may use current_state and states freely
:param cls:
:return: | [
"a",
"decorator",
"which",
"sets",
"two",
"properties",
"on",
"a",
"class",
":",
"*",
"the",
"current_state",
"property",
":",
"a",
"read",
"-",
"only",
"property",
"returning",
"the",
"state",
"machine",
"s",
"current",
"state",
"as",
"State",
"object",
"*",
"the",
"states",
"property",
":",
"a",
"tuple",
"of",
"all",
"valid",
"state",
"machine",
"states",
"as",
"State",
"objects",
"class",
"objects",
"may",
"use",
"current_state",
"and",
"states",
"freely",
":",
"param",
"cls",
":",
":",
"return",
":"
] | python | train | 44.03125 |
saltstack/salt | salt/utils/vmware.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3475-L3523 | def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine | [
"def",
"power_cycle_vm",
"(",
"virtual_machine",
",",
"action",
"=",
"'on'",
")",
":",
"if",
"action",
"==",
"'on'",
":",
"try",
":",
"task",
"=",
"virtual_machine",
".",
"PowerOn",
"(",
")",
"task_name",
"=",
"'power on'",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")",
"elif",
"action",
"==",
"'off'",
":",
"try",
":",
"task",
"=",
"virtual_machine",
".",
"PowerOff",
"(",
")",
"task_name",
"=",
"'power off'",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")",
"else",
":",
"raise",
"salt",
".",
"exceptions",
".",
"ArgumentValueError",
"(",
"'The given action is not supported'",
")",
"try",
":",
"wait_for_task",
"(",
"task",
",",
"get_managed_object_name",
"(",
"virtual_machine",
")",
",",
"task_name",
")",
"except",
"salt",
".",
"exceptions",
".",
"VMwareFileNotFoundError",
"as",
"exc",
":",
"raise",
"salt",
".",
"exceptions",
".",
"VMwarePowerOnError",
"(",
"' '",
".",
"join",
"(",
"[",
"'An error occurred during power'",
",",
"'operation, a file was not found: {0}'",
".",
"format",
"(",
"exc",
")",
"]",
")",
")",
"return",
"virtual_machine"
] | Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine | [
"Powers",
"on",
"/",
"off",
"a",
"virtual",
"machine",
"specified",
"by",
"it",
"s",
"name",
"."
] | python | train | 39.061224 |
chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L425-L434 | def __fix_field_date(self, item, attribute):
"""Fix possible errors in the field date"""
field_date = str_to_datetime(item[attribute])
try:
_ = int(field_date.strftime("%z")[0:3])
except ValueError:
logger.warning("%s in commit %s has a wrong format", attribute, item['commit'])
item[attribute] = field_date.replace(tzinfo=None).isoformat() | [
"def",
"__fix_field_date",
"(",
"self",
",",
"item",
",",
"attribute",
")",
":",
"field_date",
"=",
"str_to_datetime",
"(",
"item",
"[",
"attribute",
"]",
")",
"try",
":",
"_",
"=",
"int",
"(",
"field_date",
".",
"strftime",
"(",
"\"%z\"",
")",
"[",
"0",
":",
"3",
"]",
")",
"except",
"ValueError",
":",
"logger",
".",
"warning",
"(",
"\"%s in commit %s has a wrong format\"",
",",
"attribute",
",",
"item",
"[",
"'commit'",
"]",
")",
"item",
"[",
"attribute",
"]",
"=",
"field_date",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
".",
"isoformat",
"(",
")"
] | Fix possible errors in the field date | [
"Fix",
"possible",
"errors",
"in",
"the",
"field",
"date"
] | python | train | 40.1 |
mila/pyoo | pyoo.py | https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1767-L1774 | def date_from_number(self, value):
"""
Converts a float value to corresponding datetime instance.
"""
if not isinstance(value, numbers.Real):
return None
delta = datetime.timedelta(days=value)
return self._null_date + delta | [
"def",
"date_from_number",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"numbers",
".",
"Real",
")",
":",
"return",
"None",
"delta",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"value",
")",
"return",
"self",
".",
"_null_date",
"+",
"delta"
] | Converts a float value to corresponding datetime instance. | [
"Converts",
"a",
"float",
"value",
"to",
"corresponding",
"datetime",
"instance",
"."
] | python | train | 34.5 |
numenta/htmresearch | htmresearch/algorithms/TM.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/TM.py#L1036-L1079 | def computePhase2(self, doLearn=False):
"""
This is the phase 2 of learning, inference and multistep prediction. During
this phase, all the cell with lateral support have their predictedState
turned on and the firing segments are queued up for updates.
Parameters:
--------------------------------------------
doLearn: Boolean flag to queue segment updates during learning
retval: ?
"""
# Phase 2: compute predicted state for each cell
# - if a segment has enough horizontal connections firing because of
# bottomUpInput, it's set to be predicting, and we queue up the segment
# for reinforcement,
# - if pooling is on, try to find the best weakly activated segment to
# reinforce it, else create a new pooling segment.
for c in xrange(self.numberOfCols):
buPredicted = False # whether any cell in the column is predicted
for i in xrange(self.cellsPerColumn):
# Iterate over each of the segments of this cell
maxConfidence = 0
for s in self.cells[c][i]:
# sum(connected synapses) >= activationThreshold?
if self.isSegmentActive(s, self.activeState['t']):
self.predictedState['t'][c,i] = 1
buPredicted = True
maxConfidence = max(maxConfidence, s.dutyCycle(readOnly=True))
if doLearn:
s.totalActivations += 1 # increment activationFrequency
s.lastActiveIteration = self.iterationIdx
# mark this segment for learning
activeUpdate = self.getSegmentActiveSynapses(c,i,s,'t')
activeUpdate.phase1Flag = False
self.addToSegmentUpdates(c, i, activeUpdate)
# Store the max confidence seen among all the weak and strong segments
# as the cell's confidence.
self.confidence['t'][c,i] = maxConfidence | [
"def",
"computePhase2",
"(",
"self",
",",
"doLearn",
"=",
"False",
")",
":",
"# Phase 2: compute predicted state for each cell",
"# - if a segment has enough horizontal connections firing because of",
"# bottomUpInput, it's set to be predicting, and we queue up the segment",
"# for reinforcement,",
"# - if pooling is on, try to find the best weakly activated segment to",
"# reinforce it, else create a new pooling segment.",
"for",
"c",
"in",
"xrange",
"(",
"self",
".",
"numberOfCols",
")",
":",
"buPredicted",
"=",
"False",
"# whether any cell in the column is predicted",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"cellsPerColumn",
")",
":",
"# Iterate over each of the segments of this cell",
"maxConfidence",
"=",
"0",
"for",
"s",
"in",
"self",
".",
"cells",
"[",
"c",
"]",
"[",
"i",
"]",
":",
"# sum(connected synapses) >= activationThreshold?",
"if",
"self",
".",
"isSegmentActive",
"(",
"s",
",",
"self",
".",
"activeState",
"[",
"'t'",
"]",
")",
":",
"self",
".",
"predictedState",
"[",
"'t'",
"]",
"[",
"c",
",",
"i",
"]",
"=",
"1",
"buPredicted",
"=",
"True",
"maxConfidence",
"=",
"max",
"(",
"maxConfidence",
",",
"s",
".",
"dutyCycle",
"(",
"readOnly",
"=",
"True",
")",
")",
"if",
"doLearn",
":",
"s",
".",
"totalActivations",
"+=",
"1",
"# increment activationFrequency",
"s",
".",
"lastActiveIteration",
"=",
"self",
".",
"iterationIdx",
"# mark this segment for learning",
"activeUpdate",
"=",
"self",
".",
"getSegmentActiveSynapses",
"(",
"c",
",",
"i",
",",
"s",
",",
"'t'",
")",
"activeUpdate",
".",
"phase1Flag",
"=",
"False",
"self",
".",
"addToSegmentUpdates",
"(",
"c",
",",
"i",
",",
"activeUpdate",
")",
"# Store the max confidence seen among all the weak and strong segments",
"# as the cell's confidence.",
"self",
".",
"confidence",
"[",
"'t'",
"]",
"[",
"c",
",",
"i",
"]",
"=",
"maxConfidence"
] | This is the phase 2 of learning, inference and multistep prediction. During
this phase, all the cell with lateral support have their predictedState
turned on and the firing segments are queued up for updates.
Parameters:
--------------------------------------------
doLearn: Boolean flag to queue segment updates during learning
retval: ? | [
"This",
"is",
"the",
"phase",
"2",
"of",
"learning",
"inference",
"and",
"multistep",
"prediction",
".",
"During",
"this",
"phase",
"all",
"the",
"cell",
"with",
"lateral",
"support",
"have",
"their",
"predictedState",
"turned",
"on",
"and",
"the",
"firing",
"segments",
"are",
"queued",
"up",
"for",
"updates",
"."
] | python | train | 42 |
cloud-custodian/cloud-custodian | tools/sandbox/zerodark/zerodark/utils.py | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/sandbox/zerodark/zerodark/utils.py#L25-L30 | def row_factory(cursor, row):
"""Returns a sqlite row factory that returns a dictionary"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | [
"def",
"row_factory",
"(",
"cursor",
",",
"row",
")",
":",
"d",
"=",
"{",
"}",
"for",
"idx",
",",
"col",
"in",
"enumerate",
"(",
"cursor",
".",
"description",
")",
":",
"d",
"[",
"col",
"[",
"0",
"]",
"]",
"=",
"row",
"[",
"idx",
"]",
"return",
"d"
] | Returns a sqlite row factory that returns a dictionary | [
"Returns",
"a",
"sqlite",
"row",
"factory",
"that",
"returns",
"a",
"dictionary"
] | python | train | 32.166667 |
astropy/photutils | photutils/psf/epsf.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L862-L916 | def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError('data must be a 2D array.')
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp | [
"def",
"_interpolate_missing_data",
"(",
"data",
",",
"mask",
",",
"method",
"=",
"'cubic'",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"data_interp",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"True",
")",
"if",
"len",
"(",
"data_interp",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array.'",
")",
"if",
"mask",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'mask and data must have the same shape.'",
")",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"data_interp",
".",
"shape",
")",
"xy",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"z",
"=",
"data_interp",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
"if",
"method",
"==",
"'nearest'",
":",
"interpol",
"=",
"interpolate",
".",
"NearestNDInterpolator",
"(",
"xy",
",",
"z",
")",
"elif",
"method",
"==",
"'cubic'",
":",
"interpol",
"=",
"interpolate",
".",
"CloughTocher2DInterpolator",
"(",
"xy",
",",
"z",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported interpolation method.'",
")",
"xy_missing",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"data_interp",
"[",
"mask",
"]",
"=",
"interpol",
"(",
"xy_missing",
")",
"return",
"data_interp"
] | Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image. | [
"Interpolate",
"missing",
"data",
"as",
"identified",
"by",
"the",
"mask",
"keyword",
"."
] | python | train | 30.709091 |
moderngl/moderngl | moderngl/context.py | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L521-L537 | def copy_framebuffer(self, dst, src) -> None:
'''
Copy framebuffer content.
Use this method to:
- blit framebuffers.
- copy framebuffer content into a texture.
- downsample framebuffers. (it will allow to read the framebuffer's content)
- downsample a framebuffer directly to a texture.
Args:
dst (Framebuffer or Texture): Destination framebuffer or texture.
src (Framebuffer): Source framebuffer.
'''
self.mglo.copy_framebuffer(dst.mglo, src.mglo) | [
"def",
"copy_framebuffer",
"(",
"self",
",",
"dst",
",",
"src",
")",
"->",
"None",
":",
"self",
".",
"mglo",
".",
"copy_framebuffer",
"(",
"dst",
".",
"mglo",
",",
"src",
".",
"mglo",
")"
] | Copy framebuffer content.
Use this method to:
- blit framebuffers.
- copy framebuffer content into a texture.
- downsample framebuffers. (it will allow to read the framebuffer's content)
- downsample a framebuffer directly to a texture.
Args:
dst (Framebuffer or Texture): Destination framebuffer or texture.
src (Framebuffer): Source framebuffer. | [
"Copy",
"framebuffer",
"content",
"."
] | python | train | 34.823529 |
ming060/robotframework-uiautomatorlibrary | uiautomatorlibrary/Mobile.py | https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L296-L302 | def swipe_bottom(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.down(steps=steps) | [
"def",
"swipe_bottom",
"(",
"self",
",",
"steps",
"=",
"10",
",",
"*",
"args",
",",
"*",
"*",
"selectors",
")",
":",
"self",
".",
"device",
"(",
"*",
"*",
"selectors",
")",
".",
"swipe",
".",
"down",
"(",
"steps",
"=",
"steps",
")"
] | Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details. | [
"Swipe",
"the",
"UI",
"object",
"with",
"*",
"selectors",
"*",
"from",
"center",
"to",
"bottom"
] | python | train | 34.142857 |
dailymotion/cloudkey-py | cloudkey.py | https://github.com/dailymotion/cloudkey-py/blob/81334553e0737b87c66b12ad2f1eb8e26ef68a96/cloudkey.py#L92-L135 | def normalize(arg=None):
"""Normalizes an argument for signing purpose.
This is used for normalizing the arguments of RPC method calls.
:param arg: The argument to normalize
:return: A string representating the normalized argument.
.. doctest::
>>> from cloud.rpc import normalize
>>> normalize(['foo', 42, 'bar'])
'foo42bar'
>>> normalize({'yellow': 1, 'red': 2, 'pink' : 3})
'pink3red2yellow1'
>>> normalize(['foo', 42, {'yellow': 1, 'red': 2, 'pink' : 3}, 'bar'])
'foo42pink3red2yellow1bar'
>>> normalize(None)
''
>>> normalize([None, 1,2])
'12'
>>> normalize({2: [None, 1,2], 3: None, 4:5})
'212345'
"""
res = ''
t_arg = type(arg)
if t_arg in (list, tuple):
for i in arg:
res += normalize(i)
elif t_arg is dict:
keys = arg.keys()
keys.sort()
for key in keys:
res += '%s%s' % (normalize(key), normalize(arg[key]))
elif t_arg is unicode:
res = arg.encode('utf8')
elif t_arg is bool:
res = 'true' if arg else 'false'
elif arg != None:
res = str(arg)
return res | [
"def",
"normalize",
"(",
"arg",
"=",
"None",
")",
":",
"res",
"=",
"''",
"t_arg",
"=",
"type",
"(",
"arg",
")",
"if",
"t_arg",
"in",
"(",
"list",
",",
"tuple",
")",
":",
"for",
"i",
"in",
"arg",
":",
"res",
"+=",
"normalize",
"(",
"i",
")",
"elif",
"t_arg",
"is",
"dict",
":",
"keys",
"=",
"arg",
".",
"keys",
"(",
")",
"keys",
".",
"sort",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"res",
"+=",
"'%s%s'",
"%",
"(",
"normalize",
"(",
"key",
")",
",",
"normalize",
"(",
"arg",
"[",
"key",
"]",
")",
")",
"elif",
"t_arg",
"is",
"unicode",
":",
"res",
"=",
"arg",
".",
"encode",
"(",
"'utf8'",
")",
"elif",
"t_arg",
"is",
"bool",
":",
"res",
"=",
"'true'",
"if",
"arg",
"else",
"'false'",
"elif",
"arg",
"!=",
"None",
":",
"res",
"=",
"str",
"(",
"arg",
")",
"return",
"res"
] | Normalizes an argument for signing purpose.
This is used for normalizing the arguments of RPC method calls.
:param arg: The argument to normalize
:return: A string representating the normalized argument.
.. doctest::
>>> from cloud.rpc import normalize
>>> normalize(['foo', 42, 'bar'])
'foo42bar'
>>> normalize({'yellow': 1, 'red': 2, 'pink' : 3})
'pink3red2yellow1'
>>> normalize(['foo', 42, {'yellow': 1, 'red': 2, 'pink' : 3}, 'bar'])
'foo42pink3red2yellow1bar'
>>> normalize(None)
''
>>> normalize([None, 1,2])
'12'
>>> normalize({2: [None, 1,2], 3: None, 4:5})
'212345' | [
"Normalizes",
"an",
"argument",
"for",
"signing",
"purpose",
"."
] | python | train | 25.590909 |
mar10/pyftpsync | ftpsync/targets.py | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L480-L483 | def set_mtime(self, name, mtime, size):
"""Set modification time on file."""
self.check_write(name)
os.utime(os.path.join(self.cur_dir, name), (-1, mtime)) | [
"def",
"set_mtime",
"(",
"self",
",",
"name",
",",
"mtime",
",",
"size",
")",
":",
"self",
".",
"check_write",
"(",
"name",
")",
"os",
".",
"utime",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cur_dir",
",",
"name",
")",
",",
"(",
"-",
"1",
",",
"mtime",
")",
")"
] | Set modification time on file. | [
"Set",
"modification",
"time",
"on",
"file",
"."
] | python | train | 44 |
thomasdelaet/python-velbus | velbus/messages/push_button_status.py | https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/push_button_status.py#L42-L50 | def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['closed_channels'] = self.closed
json_dict['opened_channels'] = self.opened
json_dict['closed_long_channels'] = self.closed_long
return json.dumps(json_dict) | [
"def",
"to_json",
"(",
"self",
")",
":",
"json_dict",
"=",
"self",
".",
"to_json_basic",
"(",
")",
"json_dict",
"[",
"'closed_channels'",
"]",
"=",
"self",
".",
"closed",
"json_dict",
"[",
"'opened_channels'",
"]",
"=",
"self",
".",
"opened",
"json_dict",
"[",
"'closed_long_channels'",
"]",
"=",
"self",
".",
"closed_long",
"return",
"json",
".",
"dumps",
"(",
"json_dict",
")"
] | :return: str | [
":",
"return",
":",
"str"
] | python | train | 32.888889 |
fumitoh/modelx | modelx/core/formula.py | https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/formula.py#L406-L443 | def _reload(self, module=None):
"""Reload the source function from the source module.
**Internal use only**
Update the source function of the formula.
This method is used to updated the underlying formula
when the source code of the module in which the source function
is read from is modified.
If the formula was not created from a module, an error is raised.
If ``module_`` is not given, the source module of the formula is
reloaded. If ``module_`` is given and matches the source module,
then the module_ is used without being reloaded.
If ``module_`` is given and does not match the source module of
the formula, an error is raised.
Args:
module_: A ``ModuleSource`` object
Returns:
self
"""
if self.module is None:
raise RuntimeError
elif module is None:
import importlib
module = ModuleSource(importlib.reload(module))
elif module.name != self.module:
raise RuntimeError
if self.name in module.funcs:
func = module.funcs[self.name]
self.__init__(func=func)
else:
self.__init__(func=NULL_FORMULA)
return self | [
"def",
"_reload",
"(",
"self",
",",
"module",
"=",
"None",
")",
":",
"if",
"self",
".",
"module",
"is",
"None",
":",
"raise",
"RuntimeError",
"elif",
"module",
"is",
"None",
":",
"import",
"importlib",
"module",
"=",
"ModuleSource",
"(",
"importlib",
".",
"reload",
"(",
"module",
")",
")",
"elif",
"module",
".",
"name",
"!=",
"self",
".",
"module",
":",
"raise",
"RuntimeError",
"if",
"self",
".",
"name",
"in",
"module",
".",
"funcs",
":",
"func",
"=",
"module",
".",
"funcs",
"[",
"self",
".",
"name",
"]",
"self",
".",
"__init__",
"(",
"func",
"=",
"func",
")",
"else",
":",
"self",
".",
"__init__",
"(",
"func",
"=",
"NULL_FORMULA",
")",
"return",
"self"
] | Reload the source function from the source module.
**Internal use only**
Update the source function of the formula.
This method is used to updated the underlying formula
when the source code of the module in which the source function
is read from is modified.
If the formula was not created from a module, an error is raised.
If ``module_`` is not given, the source module of the formula is
reloaded. If ``module_`` is given and matches the source module,
then the module_ is used without being reloaded.
If ``module_`` is given and does not match the source module of
the formula, an error is raised.
Args:
module_: A ``ModuleSource`` object
Returns:
self | [
"Reload",
"the",
"source",
"function",
"from",
"the",
"source",
"module",
"."
] | python | valid | 33.131579 |
django-danceschool/django-danceschool | danceschool/core/models.py | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L317-L324 | def lastOfferedMonth(self):
'''
Sometimes a Series is associated with a month other than the one
in which the first class begins, so this returns a (year,month) tuple
that can be used in admin instead.
'''
lastOfferedSeries = self.event_set.order_by('-startTime').first()
return (lastOfferedSeries.year,lastOfferedSeries.month) | [
"def",
"lastOfferedMonth",
"(",
"self",
")",
":",
"lastOfferedSeries",
"=",
"self",
".",
"event_set",
".",
"order_by",
"(",
"'-startTime'",
")",
".",
"first",
"(",
")",
"return",
"(",
"lastOfferedSeries",
".",
"year",
",",
"lastOfferedSeries",
".",
"month",
")"
] | Sometimes a Series is associated with a month other than the one
in which the first class begins, so this returns a (year,month) tuple
that can be used in admin instead. | [
"Sometimes",
"a",
"Series",
"is",
"associated",
"with",
"a",
"month",
"other",
"than",
"the",
"one",
"in",
"which",
"the",
"first",
"class",
"begins",
"so",
"this",
"returns",
"a",
"(",
"year",
"month",
")",
"tuple",
"that",
"can",
"be",
"used",
"in",
"admin",
"instead",
"."
] | python | train | 47 |
mingchen/django-cas-ng | django_cas_ng/backends.py | https://github.com/mingchen/django-cas-ng/blob/202ca92cd770d9679bfe4e9e20b41fd19b81c311/django_cas_ng/backends.py#L155-L172 | def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, changes the username case according to
`settings.CAS_FORCE_CHANGE_USERNAME_CASE`.
"""
username_case = settings.CAS_FORCE_CHANGE_USERNAME_CASE
if username_case == 'lower':
username = username.lower()
elif username_case == 'upper':
username = username.upper()
elif username_case is not None:
raise ImproperlyConfigured(
"Invalid value for the CAS_FORCE_CHANGE_USERNAME_CASE setting. "
"Valid values are `'lower'`, `'upper'`, and `None`.")
return username | [
"def",
"clean_username",
"(",
"self",
",",
"username",
")",
":",
"username_case",
"=",
"settings",
".",
"CAS_FORCE_CHANGE_USERNAME_CASE",
"if",
"username_case",
"==",
"'lower'",
":",
"username",
"=",
"username",
".",
"lower",
"(",
")",
"elif",
"username_case",
"==",
"'upper'",
":",
"username",
"=",
"username",
".",
"upper",
"(",
")",
"elif",
"username_case",
"is",
"not",
"None",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"Invalid value for the CAS_FORCE_CHANGE_USERNAME_CASE setting. \"",
"\"Valid values are `'lower'`, `'upper'`, and `None`.\"",
")",
"return",
"username"
] | Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, changes the username case according to
`settings.CAS_FORCE_CHANGE_USERNAME_CASE`. | [
"Performs",
"any",
"cleaning",
"on",
"the",
"username",
"prior",
"to",
"using",
"it",
"to",
"get",
"or",
"create",
"the",
"user",
"object",
".",
"Returns",
"the",
"cleaned",
"username",
"."
] | python | train | 42.611111 |
Locu/chronology | kronos/kronos/storage/cassandra/client.py | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/cassandra/client.py#L107-L131 | def _retrieve(self, namespace, stream, start_id, end_time, order, limit,
configuration):
"""
Retrieve events for `stream` between `start_id` and `end_time`.
`stream` : The stream to return events for.
`start_id` : Return events with id > `start_id`.
`end_time` : Return events ending <= `end_time`.
`order` : Whether to return the results in ResultOrder.ASCENDING
or ResultOrder.DESCENDING time-order.
`configuration` : A dictionary of settings to override any default
settings, such as number of shards or width of a
time interval.
"""
stream = self.get_stream(namespace, stream, configuration)
events = stream.iterator(start_id,
uuid_from_kronos_time(end_time,
_type=UUIDType.HIGHEST),
order == ResultOrder.DESCENDING, limit)
events = events.__iter__()
event = events.next()
# If first event's ID is equal to `start_id`, skip it.
if event.id != start_id:
yield event.json
while True:
yield events.next().json | [
"def",
"_retrieve",
"(",
"self",
",",
"namespace",
",",
"stream",
",",
"start_id",
",",
"end_time",
",",
"order",
",",
"limit",
",",
"configuration",
")",
":",
"stream",
"=",
"self",
".",
"get_stream",
"(",
"namespace",
",",
"stream",
",",
"configuration",
")",
"events",
"=",
"stream",
".",
"iterator",
"(",
"start_id",
",",
"uuid_from_kronos_time",
"(",
"end_time",
",",
"_type",
"=",
"UUIDType",
".",
"HIGHEST",
")",
",",
"order",
"==",
"ResultOrder",
".",
"DESCENDING",
",",
"limit",
")",
"events",
"=",
"events",
".",
"__iter__",
"(",
")",
"event",
"=",
"events",
".",
"next",
"(",
")",
"# If first event's ID is equal to `start_id`, skip it.",
"if",
"event",
".",
"id",
"!=",
"start_id",
":",
"yield",
"event",
".",
"json",
"while",
"True",
":",
"yield",
"events",
".",
"next",
"(",
")",
".",
"json"
] | Retrieve events for `stream` between `start_id` and `end_time`.
`stream` : The stream to return events for.
`start_id` : Return events with id > `start_id`.
`end_time` : Return events ending <= `end_time`.
`order` : Whether to return the results in ResultOrder.ASCENDING
or ResultOrder.DESCENDING time-order.
`configuration` : A dictionary of settings to override any default
settings, such as number of shards or width of a
time interval. | [
"Retrieve",
"events",
"for",
"stream",
"between",
"start_id",
"and",
"end_time",
".",
"stream",
":",
"The",
"stream",
"to",
"return",
"events",
"for",
".",
"start_id",
":",
"Return",
"events",
"with",
"id",
">",
"start_id",
".",
"end_time",
":",
"Return",
"events",
"ending",
"<",
"=",
"end_time",
".",
"order",
":",
"Whether",
"to",
"return",
"the",
"results",
"in",
"ResultOrder",
".",
"ASCENDING",
"or",
"ResultOrder",
".",
"DESCENDING",
"time",
"-",
"order",
".",
"configuration",
":",
"A",
"dictionary",
"of",
"settings",
"to",
"override",
"any",
"default",
"settings",
"such",
"as",
"number",
"of",
"shards",
"or",
"width",
"of",
"a",
"time",
"interval",
"."
] | python | train | 45.64 |
xtuml/pyxtuml | bridgepoint/gen_xsd_schema.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/gen_xsd_schema.py#L126-L142 | def build_struct_type(s_sdt):
'''
Build an xsd complexType out of a S_SDT.
'''
s_dt = nav_one(s_sdt).S_DT[17]()
struct = ET.Element('xs:complexType', name=s_dt.name)
first_filter = lambda selected: not nav_one(selected).S_MBR[46, 'succeeds']()
s_mbr = nav_any(s_sdt).S_MBR[44](first_filter)
while s_mbr:
s_dt = nav_one(s_mbr).S_DT[45]()
type_name = get_type_name(s_dt)
ET.SubElement(struct, 'xs:attribute', name=s_mbr.name, type=type_name)
s_mbr = nav_one(s_mbr).S_MBR[46, 'precedes']()
return struct | [
"def",
"build_struct_type",
"(",
"s_sdt",
")",
":",
"s_dt",
"=",
"nav_one",
"(",
"s_sdt",
")",
".",
"S_DT",
"[",
"17",
"]",
"(",
")",
"struct",
"=",
"ET",
".",
"Element",
"(",
"'xs:complexType'",
",",
"name",
"=",
"s_dt",
".",
"name",
")",
"first_filter",
"=",
"lambda",
"selected",
":",
"not",
"nav_one",
"(",
"selected",
")",
".",
"S_MBR",
"[",
"46",
",",
"'succeeds'",
"]",
"(",
")",
"s_mbr",
"=",
"nav_any",
"(",
"s_sdt",
")",
".",
"S_MBR",
"[",
"44",
"]",
"(",
"first_filter",
")",
"while",
"s_mbr",
":",
"s_dt",
"=",
"nav_one",
"(",
"s_mbr",
")",
".",
"S_DT",
"[",
"45",
"]",
"(",
")",
"type_name",
"=",
"get_type_name",
"(",
"s_dt",
")",
"ET",
".",
"SubElement",
"(",
"struct",
",",
"'xs:attribute'",
",",
"name",
"=",
"s_mbr",
".",
"name",
",",
"type",
"=",
"type_name",
")",
"s_mbr",
"=",
"nav_one",
"(",
"s_mbr",
")",
".",
"S_MBR",
"[",
"46",
",",
"'precedes'",
"]",
"(",
")",
"return",
"struct"
] | Build an xsd complexType out of a S_SDT. | [
"Build",
"an",
"xsd",
"complexType",
"out",
"of",
"a",
"S_SDT",
"."
] | python | test | 33.352941 |
gc3-uzh-ch/elasticluster | elasticluster/cluster.py | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/cluster.py#L294-L370 | def add_node(self, kind, image_id, image_user, flavor,
security_group, image_userdata='', name=None, **extra):
"""
Adds a new node to the cluster. This factory method provides an
easy way to add a new node to the cluster by specifying all relevant
parameters. The node does not get started nor setup automatically,
this has to be done manually afterwards.
:param str kind: kind of node to start. this refers to the
groups defined in the ansible setup provider
:py:class:`elasticluster.providers.AnsibleSetupProvider`
Please note that this can only contain
alphanumeric characters and hyphens (and must
not end with a digit), as it is used to build
a valid hostname
:param str image_id: image id to use for the cloud instance (e.g.
ami on amazon)
:param str image_user: user to login on given image
:param str flavor: machine type to use for cloud instance
:param str security_group: security group that defines firewall rules
to the instance
:param str image_userdata: commands to execute after instance starts
:param str name: name of this node, automatically generated if None
:raises: ValueError: `kind` argument is an invalid string.
:return: created :py:class:`Node`
"""
if not self._NODE_KIND_RE.match(kind):
raise ValueError(
"Invalid name `{kind}`. The `kind` argument may only contain"
" alphanumeric characters, and must not end with a digit."
.format(kind=kind))
if kind not in self.nodes:
self.nodes[kind] = []
# To ease json dump/load, use `extra` dictionary to
# instantiate Node class
extra.update(
cloud_provider=self._cloud_provider,
cluster_name=self.name,
flavor=flavor,
image_id=image_id,
image_user=image_user,
image_userdata=image_userdata,
kind=kind,
security_group=security_group,
)
for attr in (
'flavor',
'image_id',
'image_user',
'image_userdata',
'security_group',
'user_key_name',
'user_key_private',
'user_key_public',
):
if attr not in extra:
extra[attr] = getattr(self, attr)
if not name:
# `extra` contains key `kind` already
name = self._naming_policy.new(**extra)
else:
self._naming_policy.use(kind, name)
node = Node(name=name, **extra)
self.nodes[kind].append(node)
return node | [
"def",
"add_node",
"(",
"self",
",",
"kind",
",",
"image_id",
",",
"image_user",
",",
"flavor",
",",
"security_group",
",",
"image_userdata",
"=",
"''",
",",
"name",
"=",
"None",
",",
"*",
"*",
"extra",
")",
":",
"if",
"not",
"self",
".",
"_NODE_KIND_RE",
".",
"match",
"(",
"kind",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid name `{kind}`. The `kind` argument may only contain\"",
"\" alphanumeric characters, and must not end with a digit.\"",
".",
"format",
"(",
"kind",
"=",
"kind",
")",
")",
"if",
"kind",
"not",
"in",
"self",
".",
"nodes",
":",
"self",
".",
"nodes",
"[",
"kind",
"]",
"=",
"[",
"]",
"# To ease json dump/load, use `extra` dictionary to",
"# instantiate Node class",
"extra",
".",
"update",
"(",
"cloud_provider",
"=",
"self",
".",
"_cloud_provider",
",",
"cluster_name",
"=",
"self",
".",
"name",
",",
"flavor",
"=",
"flavor",
",",
"image_id",
"=",
"image_id",
",",
"image_user",
"=",
"image_user",
",",
"image_userdata",
"=",
"image_userdata",
",",
"kind",
"=",
"kind",
",",
"security_group",
"=",
"security_group",
",",
")",
"for",
"attr",
"in",
"(",
"'flavor'",
",",
"'image_id'",
",",
"'image_user'",
",",
"'image_userdata'",
",",
"'security_group'",
",",
"'user_key_name'",
",",
"'user_key_private'",
",",
"'user_key_public'",
",",
")",
":",
"if",
"attr",
"not",
"in",
"extra",
":",
"extra",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"not",
"name",
":",
"# `extra` contains key `kind` already",
"name",
"=",
"self",
".",
"_naming_policy",
".",
"new",
"(",
"*",
"*",
"extra",
")",
"else",
":",
"self",
".",
"_naming_policy",
".",
"use",
"(",
"kind",
",",
"name",
")",
"node",
"=",
"Node",
"(",
"name",
"=",
"name",
",",
"*",
"*",
"extra",
")",
"self",
".",
"nodes",
"[",
"kind",
"]",
".",
"append",
"(",
"node",
")",
"return",
"node"
] | Adds a new node to the cluster. This factory method provides an
easy way to add a new node to the cluster by specifying all relevant
parameters. The node does not get started nor setup automatically,
this has to be done manually afterwards.
:param str kind: kind of node to start. this refers to the
groups defined in the ansible setup provider
:py:class:`elasticluster.providers.AnsibleSetupProvider`
Please note that this can only contain
alphanumeric characters and hyphens (and must
not end with a digit), as it is used to build
a valid hostname
:param str image_id: image id to use for the cloud instance (e.g.
ami on amazon)
:param str image_user: user to login on given image
:param str flavor: machine type to use for cloud instance
:param str security_group: security group that defines firewall rules
to the instance
:param str image_userdata: commands to execute after instance starts
:param str name: name of this node, automatically generated if None
:raises: ValueError: `kind` argument is an invalid string.
:return: created :py:class:`Node` | [
"Adds",
"a",
"new",
"node",
"to",
"the",
"cluster",
".",
"This",
"factory",
"method",
"provides",
"an",
"easy",
"way",
"to",
"add",
"a",
"new",
"node",
"to",
"the",
"cluster",
"by",
"specifying",
"all",
"relevant",
"parameters",
".",
"The",
"node",
"does",
"not",
"get",
"started",
"nor",
"setup",
"automatically",
"this",
"has",
"to",
"be",
"done",
"manually",
"afterwards",
"."
] | python | train | 37.272727 |
oasis-open/cti-taxii-client | taxii2client/__init__.py | https://github.com/oasis-open/cti-taxii-client/blob/b4c037fb61d8b8892af34423e2c67c81218d6f8e/taxii2client/__init__.py#L708-L711 | def refresh(self, accept=MEDIA_TYPE_TAXII_V20):
"""Update the API Root's information and list of Collections"""
self.refresh_information(accept)
self.refresh_collections(accept) | [
"def",
"refresh",
"(",
"self",
",",
"accept",
"=",
"MEDIA_TYPE_TAXII_V20",
")",
":",
"self",
".",
"refresh_information",
"(",
"accept",
")",
"self",
".",
"refresh_collections",
"(",
"accept",
")"
] | Update the API Root's information and list of Collections | [
"Update",
"the",
"API",
"Root",
"s",
"information",
"and",
"list",
"of",
"Collections"
] | python | valid | 49.5 |
raamana/mrivis | mrivis/base.py | https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/base.py#L1082-L1091 | def _summarize_in_roi(self, label_mask, num_clusters_per_roi=1, metric='minkowski'):
"""returns a single row summarizing (typically via mean) all rows in an ROI."""
this_label = self.carpet[label_mask.flatten(), :]
if num_clusters_per_roi == 1:
out_matrix = self._summary_func(this_label, axis=0)
else:
out_matrix = self._make_clusters(this_label, num_clusters_per_roi, metric)
return out_matrix | [
"def",
"_summarize_in_roi",
"(",
"self",
",",
"label_mask",
",",
"num_clusters_per_roi",
"=",
"1",
",",
"metric",
"=",
"'minkowski'",
")",
":",
"this_label",
"=",
"self",
".",
"carpet",
"[",
"label_mask",
".",
"flatten",
"(",
")",
",",
":",
"]",
"if",
"num_clusters_per_roi",
"==",
"1",
":",
"out_matrix",
"=",
"self",
".",
"_summary_func",
"(",
"this_label",
",",
"axis",
"=",
"0",
")",
"else",
":",
"out_matrix",
"=",
"self",
".",
"_make_clusters",
"(",
"this_label",
",",
"num_clusters_per_roi",
",",
"metric",
")",
"return",
"out_matrix"
] | returns a single row summarizing (typically via mean) all rows in an ROI. | [
"returns",
"a",
"single",
"row",
"summarizing",
"(",
"typically",
"via",
"mean",
")",
"all",
"rows",
"in",
"an",
"ROI",
"."
] | python | train | 45.2 |
bradmontgomery/django-redis-metrics | redis_metrics/management/commands/system_metric.py | https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/management/commands/system_metric.py#L89-L93 | def _mem(self):
"""Record Memory usage."""
value = int(psutil.virtual_memory().percent)
set_metric("memory", value, category=self.category)
gauge("memory", value) | [
"def",
"_mem",
"(",
"self",
")",
":",
"value",
"=",
"int",
"(",
"psutil",
".",
"virtual_memory",
"(",
")",
".",
"percent",
")",
"set_metric",
"(",
"\"memory\"",
",",
"value",
",",
"category",
"=",
"self",
".",
"category",
")",
"gauge",
"(",
"\"memory\"",
",",
"value",
")"
] | Record Memory usage. | [
"Record",
"Memory",
"usage",
"."
] | python | train | 38 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L858-L887 | def _parse_processor_embedded_health(self, data):
"""Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: processor details like cpu arch and number of cpus.
"""
processor = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA']
['PROCESSORS']), 'PROCESSOR')
if processor is None:
msg = "Unable to get cpu data. Error: Data missing"
raise exception.IloError(msg)
cpus = 0
for proc in processor:
for val in proc.values():
processor_detail = val['VALUE']
proc_core_threads = processor_detail.split('; ')
for x in proc_core_threads:
if "thread" in x:
v = x.split()
try:
cpus = cpus + int(v[0])
except ValueError:
msg = ("Unable to get cpu data. "
"The Value %s returned couldn't be "
"manipulated to get number of "
"actual processors" % processor_detail)
raise exception.IloError(msg)
cpu_arch = 'x86_64'
return cpus, cpu_arch | [
"def",
"_parse_processor_embedded_health",
"(",
"self",
",",
"data",
")",
":",
"processor",
"=",
"self",
".",
"get_value_as_list",
"(",
"(",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
"[",
"'PROCESSORS'",
"]",
")",
",",
"'PROCESSOR'",
")",
"if",
"processor",
"is",
"None",
":",
"msg",
"=",
"\"Unable to get cpu data. Error: Data missing\"",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")",
"cpus",
"=",
"0",
"for",
"proc",
"in",
"processor",
":",
"for",
"val",
"in",
"proc",
".",
"values",
"(",
")",
":",
"processor_detail",
"=",
"val",
"[",
"'VALUE'",
"]",
"proc_core_threads",
"=",
"processor_detail",
".",
"split",
"(",
"'; '",
")",
"for",
"x",
"in",
"proc_core_threads",
":",
"if",
"\"thread\"",
"in",
"x",
":",
"v",
"=",
"x",
".",
"split",
"(",
")",
"try",
":",
"cpus",
"=",
"cpus",
"+",
"int",
"(",
"v",
"[",
"0",
"]",
")",
"except",
"ValueError",
":",
"msg",
"=",
"(",
"\"Unable to get cpu data. \"",
"\"The Value %s returned couldn't be \"",
"\"manipulated to get number of \"",
"\"actual processors\"",
"%",
"processor_detail",
")",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")",
"cpu_arch",
"=",
"'x86_64'",
"return",
"cpus",
",",
"cpu_arch"
] | Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: processor details like cpu arch and number of cpus. | [
"Parse",
"the",
"get_host_health_data",
"()",
"for",
"essential",
"properties"
] | python | train | 45.366667 |
libtcod/python-tcod | tcod/libtcodpy.py | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L3377-L3383 | def noise_set_type(n: tcod.noise.Noise, typ: int) -> None:
"""Set a Noise objects default noise algorithm.
Args:
typ (int): Any NOISE_* constant.
"""
n.algorithm = typ | [
"def",
"noise_set_type",
"(",
"n",
":",
"tcod",
".",
"noise",
".",
"Noise",
",",
"typ",
":",
"int",
")",
"->",
"None",
":",
"n",
".",
"algorithm",
"=",
"typ"
] | Set a Noise objects default noise algorithm.
Args:
typ (int): Any NOISE_* constant. | [
"Set",
"a",
"Noise",
"objects",
"default",
"noise",
"algorithm",
"."
] | python | train | 26.571429 |
Scifabric/pbs | pbs.py | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L141-L150 | def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res) | [
"def",
"update_project",
"(",
"config",
",",
"task_presenter",
",",
"results",
",",
"long_description",
",",
"tutorial",
",",
"watch",
")",
":",
"# pragma: no cover",
"if",
"watch",
":",
"res",
"=",
"_update_project_watch",
"(",
"config",
",",
"task_presenter",
",",
"results",
",",
"long_description",
",",
"tutorial",
")",
"else",
":",
"res",
"=",
"_update_project",
"(",
"config",
",",
"task_presenter",
",",
"results",
",",
"long_description",
",",
"tutorial",
")",
"click",
".",
"echo",
"(",
"res",
")"
] | Update project templates and information. | [
"Update",
"project",
"templates",
"and",
"information",
"."
] | python | train | 47 |
Cymmetria/honeycomb | honeycomb/integrationmanager/tasks.py | https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/integrationmanager/tasks.py#L104-L167 | def send_alert_to_configured_integration(integration_alert):
"""Send IntegrationAlert to configured integration."""
try:
alert = integration_alert.alert
configured_integration = integration_alert.configured_integration
integration = configured_integration.integration
integration_actions_instance = configured_integration.integration.module
alert_fields = dict()
if integration.required_fields:
if not all([hasattr(alert, _) for _ in integration.required_fields]):
logger.debug("Alert does not have all required_fields (%s) for integration %s, skipping",
integration.required_fields,
integration.name)
return
exclude_fields = ["alert_type", "service_type"]
alert_fields = {}
for field in alert.__slots__:
if hasattr(alert, field) and field not in exclude_fields:
alert_fields[field] = getattr(alert, field)
logger.debug("Sending alert %s to %s", alert_fields, integration.name)
output_data, output_file_content = integration_actions_instance.send_event(alert_fields)
if integration.polling_enabled:
integration_alert.status = IntegrationAlertStatuses.POLLING.name
polling_integration_alerts.append(integration_alert)
else:
integration_alert.status = IntegrationAlertStatuses.DONE.name
integration_alert.send_time = get_current_datetime_utc()
integration_alert.output_data = json.dumps(output_data)
# TODO: do something with successfully handled alerts? They are all written to debug log file
except exceptions.IntegrationMissingRequiredFieldError as exc:
logger.exception("Send response formatting for integration alert %s failed. Missing required fields",
integration_alert,
exc.message)
integration_alert.status = IntegrationAlertStatuses.ERROR_MISSING_SEND_FIELDS.name
except exceptions.IntegrationOutputFormatError:
logger.exception("Send response formatting for integration alert %s failed", integration_alert)
integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING_FORMATTING.name
except exceptions.IntegrationSendEventError as exc:
integration_send_retries = integration_alert.retries if integration_alert.retries <= MAX_SEND_RETRIES \
else MAX_SEND_RETRIES # making sure we do not exceed celery max retries
send_retries_left = integration_send_retries - 1
integration_alert.retries = send_retries_left
logger.error("Sending integration alert %s failed. Message: %s. Retries left: %s",
integration_alert,
exc.message,
send_retries_left)
if send_retries_left == 0:
integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING.name
if send_retries_left > 0:
sleep(SEND_ALERT_DATA_INTERVAL)
send_alert_to_configured_integration(integration_alert) | [
"def",
"send_alert_to_configured_integration",
"(",
"integration_alert",
")",
":",
"try",
":",
"alert",
"=",
"integration_alert",
".",
"alert",
"configured_integration",
"=",
"integration_alert",
".",
"configured_integration",
"integration",
"=",
"configured_integration",
".",
"integration",
"integration_actions_instance",
"=",
"configured_integration",
".",
"integration",
".",
"module",
"alert_fields",
"=",
"dict",
"(",
")",
"if",
"integration",
".",
"required_fields",
":",
"if",
"not",
"all",
"(",
"[",
"hasattr",
"(",
"alert",
",",
"_",
")",
"for",
"_",
"in",
"integration",
".",
"required_fields",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"\"Alert does not have all required_fields (%s) for integration %s, skipping\"",
",",
"integration",
".",
"required_fields",
",",
"integration",
".",
"name",
")",
"return",
"exclude_fields",
"=",
"[",
"\"alert_type\"",
",",
"\"service_type\"",
"]",
"alert_fields",
"=",
"{",
"}",
"for",
"field",
"in",
"alert",
".",
"__slots__",
":",
"if",
"hasattr",
"(",
"alert",
",",
"field",
")",
"and",
"field",
"not",
"in",
"exclude_fields",
":",
"alert_fields",
"[",
"field",
"]",
"=",
"getattr",
"(",
"alert",
",",
"field",
")",
"logger",
".",
"debug",
"(",
"\"Sending alert %s to %s\"",
",",
"alert_fields",
",",
"integration",
".",
"name",
")",
"output_data",
",",
"output_file_content",
"=",
"integration_actions_instance",
".",
"send_event",
"(",
"alert_fields",
")",
"if",
"integration",
".",
"polling_enabled",
":",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"POLLING",
".",
"name",
"polling_integration_alerts",
".",
"append",
"(",
"integration_alert",
")",
"else",
":",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"DONE",
".",
"name",
"integration_alert",
".",
"send_time",
"=",
"get_current_datetime_utc",
"(",
")",
"integration_alert",
".",
"output_data",
"=",
"json",
".",
"dumps",
"(",
"output_data",
")",
"# TODO: do something with successfully handled alerts? They are all written to debug log file",
"except",
"exceptions",
".",
"IntegrationMissingRequiredFieldError",
"as",
"exc",
":",
"logger",
".",
"exception",
"(",
"\"Send response formatting for integration alert %s failed. Missing required fields\"",
",",
"integration_alert",
",",
"exc",
".",
"message",
")",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"ERROR_MISSING_SEND_FIELDS",
".",
"name",
"except",
"exceptions",
".",
"IntegrationOutputFormatError",
":",
"logger",
".",
"exception",
"(",
"\"Send response formatting for integration alert %s failed\"",
",",
"integration_alert",
")",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"ERROR_SENDING_FORMATTING",
".",
"name",
"except",
"exceptions",
".",
"IntegrationSendEventError",
"as",
"exc",
":",
"integration_send_retries",
"=",
"integration_alert",
".",
"retries",
"if",
"integration_alert",
".",
"retries",
"<=",
"MAX_SEND_RETRIES",
"else",
"MAX_SEND_RETRIES",
"# making sure we do not exceed celery max retries",
"send_retries_left",
"=",
"integration_send_retries",
"-",
"1",
"integration_alert",
".",
"retries",
"=",
"send_retries_left",
"logger",
".",
"error",
"(",
"\"Sending integration alert %s failed. Message: %s. Retries left: %s\"",
",",
"integration_alert",
",",
"exc",
".",
"message",
",",
"send_retries_left",
")",
"if",
"send_retries_left",
"==",
"0",
":",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"ERROR_SENDING",
".",
"name",
"if",
"send_retries_left",
">",
"0",
":",
"sleep",
"(",
"SEND_ALERT_DATA_INTERVAL",
")",
"send_alert_to_configured_integration",
"(",
"integration_alert",
")"
] | Send IntegrationAlert to configured integration. | [
"Send",
"IntegrationAlert",
"to",
"configured",
"integration",
"."
] | python | train | 48 |
dossier/dossier.fc | python/dossier/fc/string_counter.py | https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/string_counter.py#L137-L145 | def _fix_key(key):
'''Normalize keys to Unicode strings.'''
if isinstance(key, unicode):
return key
if isinstance(key, str):
# On my system, the default encoding is `ascii`, so let's
# explicitly say UTF-8?
return unicode(key, 'utf-8')
raise TypeError(key) | [
"def",
"_fix_key",
"(",
"key",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"unicode",
")",
":",
"return",
"key",
"if",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"# On my system, the default encoding is `ascii`, so let's",
"# explicitly say UTF-8?",
"return",
"unicode",
"(",
"key",
",",
"'utf-8'",
")",
"raise",
"TypeError",
"(",
"key",
")"
] | Normalize keys to Unicode strings. | [
"Normalize",
"keys",
"to",
"Unicode",
"strings",
"."
] | python | train | 36.444444 |
sixty-north/asq | asq/queryables.py | https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L240-L289 | def select_with_correspondence(
self,
selector,
result_selector=KeyedElement):
'''Apply a callable to each element in an input sequence, generating a new
sequence of 2-tuples where the first element is the input value and the
second is the transformed input value.
The generated sequence is lazily evaluated.
Note: This method uses deferred execution.
Args:
selector: A unary function mapping a value in the source sequence
to the second argument of the result selector.
result_selector: A binary callable mapping the of a value in
the source sequence and the transformed value to the
corresponding value in the generated sequence. The two
positional arguments of the selector function are the original
source element and the transformed value. The return value
should be the corresponding value in the result sequence. The
default selector produces a KeyedElement containing the index
and the element giving this function similar behaviour to the
built-in enumerate().
Returns:
When using the default selector, a Queryable whose elements are
KeyedElements where the first element is from the input sequence
and the second is the result of invoking the transform function on
the first value.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If transform is not callable.
'''
if self.closed():
raise ValueError("Attempt to call select_with_correspondence() on a "
"closed Queryable.")
if not is_callable(selector):
raise TypeError("select_with_correspondence() parameter selector={0} is "
"not callable".format(repr(selector)))
if not is_callable(result_selector):
raise TypeError("select_with_correspondence() parameter result_selector={0} is "
"not callable".format(repr(result_selector)))
return self._create(result_selector(elem, selector(elem)) for elem in iter(self)) | [
"def",
"select_with_correspondence",
"(",
"self",
",",
"selector",
",",
"result_selector",
"=",
"KeyedElement",
")",
":",
"if",
"self",
".",
"closed",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Attempt to call select_with_correspondence() on a \"",
"\"closed Queryable.\"",
")",
"if",
"not",
"is_callable",
"(",
"selector",
")",
":",
"raise",
"TypeError",
"(",
"\"select_with_correspondence() parameter selector={0} is \"",
"\"not callable\"",
".",
"format",
"(",
"repr",
"(",
"selector",
")",
")",
")",
"if",
"not",
"is_callable",
"(",
"result_selector",
")",
":",
"raise",
"TypeError",
"(",
"\"select_with_correspondence() parameter result_selector={0} is \"",
"\"not callable\"",
".",
"format",
"(",
"repr",
"(",
"result_selector",
")",
")",
")",
"return",
"self",
".",
"_create",
"(",
"result_selector",
"(",
"elem",
",",
"selector",
"(",
"elem",
")",
")",
"for",
"elem",
"in",
"iter",
"(",
"self",
")",
")"
] | Apply a callable to each element in an input sequence, generating a new
sequence of 2-tuples where the first element is the input value and the
second is the transformed input value.
The generated sequence is lazily evaluated.
Note: This method uses deferred execution.
Args:
selector: A unary function mapping a value in the source sequence
to the second argument of the result selector.
result_selector: A binary callable mapping the of a value in
the source sequence and the transformed value to the
corresponding value in the generated sequence. The two
positional arguments of the selector function are the original
source element and the transformed value. The return value
should be the corresponding value in the result sequence. The
default selector produces a KeyedElement containing the index
and the element giving this function similar behaviour to the
built-in enumerate().
Returns:
When using the default selector, a Queryable whose elements are
KeyedElements where the first element is from the input sequence
and the second is the result of invoking the transform function on
the first value.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If transform is not callable. | [
"Apply",
"a",
"callable",
"to",
"each",
"element",
"in",
"an",
"input",
"sequence",
"generating",
"a",
"new",
"sequence",
"of",
"2",
"-",
"tuples",
"where",
"the",
"first",
"element",
"is",
"the",
"input",
"value",
"and",
"the",
"second",
"is",
"the",
"transformed",
"input",
"value",
"."
] | python | train | 45.02 |
chovanecm/sacredboard | sacredboard/app/data/pymongo/metricsdao.py | https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/metricsdao.py#L57-L65 | def delete(self, run_id):
"""
Delete all metrics belonging to the given run.
:param run_id: ID of the Run that the metric belongs to.
"""
self.generic_dao.delete_record(
self.metrics_collection_name,
{"run_id": self._parse_run_id(run_id)}) | [
"def",
"delete",
"(",
"self",
",",
"run_id",
")",
":",
"self",
".",
"generic_dao",
".",
"delete_record",
"(",
"self",
".",
"metrics_collection_name",
",",
"{",
"\"run_id\"",
":",
"self",
".",
"_parse_run_id",
"(",
"run_id",
")",
"}",
")"
] | Delete all metrics belonging to the given run.
:param run_id: ID of the Run that the metric belongs to. | [
"Delete",
"all",
"metrics",
"belonging",
"to",
"the",
"given",
"run",
"."
] | python | train | 32.555556 |
knipknap/exscript | Exscript/account.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/account.py#L105-L121 | def acquire(self, signal=True):
"""
Locks the account.
Method has no effect if the constructor argument `needs_lock`
wsa set to False.
:type signal: bool
:param signal: Whether to emit the acquired_event signal.
"""
if not self.needs_lock:
return
with self.synclock:
while not self.lock.acquire(False):
self.synclock.wait()
if signal:
self.acquired_event(self)
self.synclock.notify_all() | [
"def",
"acquire",
"(",
"self",
",",
"signal",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"needs_lock",
":",
"return",
"with",
"self",
".",
"synclock",
":",
"while",
"not",
"self",
".",
"lock",
".",
"acquire",
"(",
"False",
")",
":",
"self",
".",
"synclock",
".",
"wait",
"(",
")",
"if",
"signal",
":",
"self",
".",
"acquired_event",
"(",
"self",
")",
"self",
".",
"synclock",
".",
"notify_all",
"(",
")"
] | Locks the account.
Method has no effect if the constructor argument `needs_lock`
wsa set to False.
:type signal: bool
:param signal: Whether to emit the acquired_event signal. | [
"Locks",
"the",
"account",
".",
"Method",
"has",
"no",
"effect",
"if",
"the",
"constructor",
"argument",
"needs_lock",
"wsa",
"set",
"to",
"False",
"."
] | python | train | 30.823529 |
pydata/xarray | xarray/core/indexing.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/indexing.py#L716-L740 | def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(tuple(o[new_key.tuple] for o in
np.broadcast_arrays(*old_key.tuple))) | [
"def",
"_combine_indexers",
"(",
"old_key",
",",
"shape",
",",
"new_key",
")",
":",
"if",
"not",
"isinstance",
"(",
"old_key",
",",
"VectorizedIndexer",
")",
":",
"old_key",
"=",
"_outer_to_vectorized_indexer",
"(",
"old_key",
",",
"shape",
")",
"if",
"len",
"(",
"old_key",
".",
"tuple",
")",
"==",
"0",
":",
"return",
"new_key",
"new_shape",
"=",
"np",
".",
"broadcast",
"(",
"*",
"old_key",
".",
"tuple",
")",
".",
"shape",
"if",
"isinstance",
"(",
"new_key",
",",
"VectorizedIndexer",
")",
":",
"new_key",
"=",
"_arrayize_vectorized_indexer",
"(",
"new_key",
",",
"new_shape",
")",
"else",
":",
"new_key",
"=",
"_outer_to_vectorized_indexer",
"(",
"new_key",
",",
"new_shape",
")",
"return",
"VectorizedIndexer",
"(",
"tuple",
"(",
"o",
"[",
"new_key",
".",
"tuple",
"]",
"for",
"o",
"in",
"np",
".",
"broadcast_arrays",
"(",
"*",
"old_key",
".",
"tuple",
")",
")",
")"
] | Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key] | [
"Combine",
"two",
"indexers",
"."
] | python | train | 34.96 |
praekeltfoundation/seaworthy | seaworthy/definitions.py | https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/definitions.py#L297-L306 | def stop(self, timeout=5):
"""
Stop the container. The container must have been created.
:param timeout:
Timeout in seconds to wait for the container to stop before sending
a ``SIGKILL``. Default: 5 (half the Docker default)
"""
self.inner().stop(timeout=timeout)
self.inner().reload() | [
"def",
"stop",
"(",
"self",
",",
"timeout",
"=",
"5",
")",
":",
"self",
".",
"inner",
"(",
")",
".",
"stop",
"(",
"timeout",
"=",
"timeout",
")",
"self",
".",
"inner",
"(",
")",
".",
"reload",
"(",
")"
] | Stop the container. The container must have been created.
:param timeout:
Timeout in seconds to wait for the container to stop before sending
a ``SIGKILL``. Default: 5 (half the Docker default) | [
"Stop",
"the",
"container",
".",
"The",
"container",
"must",
"have",
"been",
"created",
"."
] | python | train | 34.9 |
PmagPy/PmagPy | pmagpy/pmag.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L7447-L7573 | def doigrf(lon, lat, alt, date, **kwargs):
"""
Calculates the interpolated (<2015) or extrapolated (>2015) main field and
secular variation coefficients and passes them to the Malin and Barraclough
routine (function pmag.magsyn) to calculate the field from the coefficients.
Parameters:
-----------
lon : east longitude in degrees (0 to 360 or -180 to 180)
lat : latitude in degrees (-90 to 90)
alt : height above mean sea level in km (itype = 1 assumed)
date : Required date in years and decimals of a year (A.D.)
Optional Parameters:
-----------
coeffs : if True, then return the gh coefficients
mod : model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b','shadif14k')
arch3k (Korte et al., 2009)
cals3k (Korte and Constable, 2011)
cals10k.1b (Korte et al., 2011)
pfm9k (Nilsson et al., 2014)
hfm.OL1.A1 (Constable et al., 2016)
cals10k.2 (Constable et al., 2016)
shadif14k (Pavon-Carrasco et al. (2014)
NB : the first four of these models, are constrained to agree
with gufm1 (Jackson et al., 2000) for the past four centuries
Return
-----------
x : north component of the magnetic field in nT
y : east component of the magnetic field in nT
z : downward component of the magnetic field in nT
f : total magnetic field in nT
By default, igrf12 coefficients are used between 1900 and 2020
from http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html.
To check the results you can run the interactive program at the NGDC
www.ngdc.noaa.gov/geomag-web
"""
from . import coefficients as cf
gh, sv = [], []
colat = 90. - lat
#! convert to colatitude for MB routine
if lon < 0:
lon = lon + 360.
# ensure all positive east longitudes
itype = 1
models, igrf12coeffs = cf.get_igrf12()
if 'mod' in list(kwargs.keys()):
if kwargs['mod'] == 'arch3k':
psvmodels, psvcoeffs = cf.get_arch3k() # use ARCH3k coefficients
elif kwargs['mod'] == 'cals3k':
# use CALS3K_4b coefficients between -1000,1940
psvmodels, psvcoeffs = cf.get_cals3k()
elif kwargs['mod'] == 'pfm9k':
# use PFM9k (Nilsson et al., 2014), coefficients from -7000 to 1900
psvmodels, psvcoeffs = cf.get_pfm9k()
elif kwargs['mod'] == 'hfm10k':
# use HFM.OL1.A1 (Constable et al., 2016), coefficients from -8000
# to 1900
psvmodels, psvcoeffs = cf.get_hfm10k()
elif kwargs['mod'] == 'cals10k.2':
# use CALS10k.2 (Constable et al., 2016), coefficients from -8000
# to 1900
psvmodels, psvcoeffs = cf.get_cals10k_2()
elif kwargs['mod'] == 'shadif14k':
# use CALS10k.2 (Constable et al., 2016), coefficients from -8000
# to 1900
psvmodels, psvcoeffs = cf.get_shadif14k()
else:
# Korte and Constable, 2011; use prior to -1000, back to -8000
psvmodels, psvcoeffs = cf.get_cals10k()
# use geodetic coordinates
if 'models' in kwargs:
if 'mod' in list(kwargs.keys()):
return psvmodels, psvcoeffs
else:
return models, igrf12coeffs
if date < -12000:
print('too old')
return
if 'mod' in list(kwargs.keys()) and kwargs['mod'] == 'shadif14k':
if date < -10000:
incr = 100
else:
incr = 50
model = date - date % incr
gh = psvcoeffs[psvmodels.index(int(model))]
sv = old_div(
(psvcoeffs[psvmodels.index(int(model + incr))] - gh), float(incr))
x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon)
elif date < -1000:
incr = 10
model = date - date % incr
gh = psvcoeffs[psvmodels.index(int(model))]
sv = old_div(
(psvcoeffs[psvmodels.index(int(model + incr))] - gh), float(incr))
x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon)
elif date < 1900:
if kwargs['mod'] == 'cals10k':
incr = 50
else:
incr = 10
model = date - date % incr
gh = psvcoeffs[psvmodels.index(model)]
if model + incr < 1900:
sv = old_div(
(psvcoeffs[psvmodels.index(model + incr)] - gh), float(incr))
else:
field2 = igrf12coeffs[models.index(1940)][0:120]
sv = old_div((field2 - gh), float(1940 - model))
x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon)
else:
model = date - date % 5
if date < 2015:
gh = igrf12coeffs[models.index(model)]
sv = old_div((igrf12coeffs[models.index(model + 5)] - gh), 5.)
x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon)
else:
gh = igrf12coeffs[models.index(2015)]
sv = igrf12coeffs[models.index(2015.20)]
x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon)
if 'coeffs' in list(kwargs.keys()):
return gh
else:
return x, y, z, f | [
"def",
"doigrf",
"(",
"lon",
",",
"lat",
",",
"alt",
",",
"date",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"import",
"coefficients",
"as",
"cf",
"gh",
",",
"sv",
"=",
"[",
"]",
",",
"[",
"]",
"colat",
"=",
"90.",
"-",
"lat",
"#! convert to colatitude for MB routine",
"if",
"lon",
"<",
"0",
":",
"lon",
"=",
"lon",
"+",
"360.",
"# ensure all positive east longitudes",
"itype",
"=",
"1",
"models",
",",
"igrf12coeffs",
"=",
"cf",
".",
"get_igrf12",
"(",
")",
"if",
"'mod'",
"in",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
":",
"if",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'arch3k'",
":",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_arch3k",
"(",
")",
"# use ARCH3k coefficients",
"elif",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'cals3k'",
":",
"# use CALS3K_4b coefficients between -1000,1940",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_cals3k",
"(",
")",
"elif",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'pfm9k'",
":",
"# use PFM9k (Nilsson et al., 2014), coefficients from -7000 to 1900",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_pfm9k",
"(",
")",
"elif",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'hfm10k'",
":",
"# use HFM.OL1.A1 (Constable et al., 2016), coefficients from -8000",
"# to 1900",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_hfm10k",
"(",
")",
"elif",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'cals10k.2'",
":",
"# use CALS10k.2 (Constable et al., 2016), coefficients from -8000",
"# to 1900",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_cals10k_2",
"(",
")",
"elif",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'shadif14k'",
":",
"# use CALS10k.2 (Constable et al., 2016), coefficients from -8000",
"# to 1900",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_shadif14k",
"(",
")",
"else",
":",
"# Korte and Constable, 2011; use prior to -1000, back to -8000",
"psvmodels",
",",
"psvcoeffs",
"=",
"cf",
".",
"get_cals10k",
"(",
")",
"# use geodetic coordinates",
"if",
"'models'",
"in",
"kwargs",
":",
"if",
"'mod'",
"in",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
":",
"return",
"psvmodels",
",",
"psvcoeffs",
"else",
":",
"return",
"models",
",",
"igrf12coeffs",
"if",
"date",
"<",
"-",
"12000",
":",
"print",
"(",
"'too old'",
")",
"return",
"if",
"'mod'",
"in",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"and",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'shadif14k'",
":",
"if",
"date",
"<",
"-",
"10000",
":",
"incr",
"=",
"100",
"else",
":",
"incr",
"=",
"50",
"model",
"=",
"date",
"-",
"date",
"%",
"incr",
"gh",
"=",
"psvcoeffs",
"[",
"psvmodels",
".",
"index",
"(",
"int",
"(",
"model",
")",
")",
"]",
"sv",
"=",
"old_div",
"(",
"(",
"psvcoeffs",
"[",
"psvmodels",
".",
"index",
"(",
"int",
"(",
"model",
"+",
"incr",
")",
")",
"]",
"-",
"gh",
")",
",",
"float",
"(",
"incr",
")",
")",
"x",
",",
"y",
",",
"z",
",",
"f",
"=",
"magsyn",
"(",
"gh",
",",
"sv",
",",
"model",
",",
"date",
",",
"itype",
",",
"alt",
",",
"colat",
",",
"lon",
")",
"elif",
"date",
"<",
"-",
"1000",
":",
"incr",
"=",
"10",
"model",
"=",
"date",
"-",
"date",
"%",
"incr",
"gh",
"=",
"psvcoeffs",
"[",
"psvmodels",
".",
"index",
"(",
"int",
"(",
"model",
")",
")",
"]",
"sv",
"=",
"old_div",
"(",
"(",
"psvcoeffs",
"[",
"psvmodels",
".",
"index",
"(",
"int",
"(",
"model",
"+",
"incr",
")",
")",
"]",
"-",
"gh",
")",
",",
"float",
"(",
"incr",
")",
")",
"x",
",",
"y",
",",
"z",
",",
"f",
"=",
"magsyn",
"(",
"gh",
",",
"sv",
",",
"model",
",",
"date",
",",
"itype",
",",
"alt",
",",
"colat",
",",
"lon",
")",
"elif",
"date",
"<",
"1900",
":",
"if",
"kwargs",
"[",
"'mod'",
"]",
"==",
"'cals10k'",
":",
"incr",
"=",
"50",
"else",
":",
"incr",
"=",
"10",
"model",
"=",
"date",
"-",
"date",
"%",
"incr",
"gh",
"=",
"psvcoeffs",
"[",
"psvmodels",
".",
"index",
"(",
"model",
")",
"]",
"if",
"model",
"+",
"incr",
"<",
"1900",
":",
"sv",
"=",
"old_div",
"(",
"(",
"psvcoeffs",
"[",
"psvmodels",
".",
"index",
"(",
"model",
"+",
"incr",
")",
"]",
"-",
"gh",
")",
",",
"float",
"(",
"incr",
")",
")",
"else",
":",
"field2",
"=",
"igrf12coeffs",
"[",
"models",
".",
"index",
"(",
"1940",
")",
"]",
"[",
"0",
":",
"120",
"]",
"sv",
"=",
"old_div",
"(",
"(",
"field2",
"-",
"gh",
")",
",",
"float",
"(",
"1940",
"-",
"model",
")",
")",
"x",
",",
"y",
",",
"z",
",",
"f",
"=",
"magsyn",
"(",
"gh",
",",
"sv",
",",
"model",
",",
"date",
",",
"itype",
",",
"alt",
",",
"colat",
",",
"lon",
")",
"else",
":",
"model",
"=",
"date",
"-",
"date",
"%",
"5",
"if",
"date",
"<",
"2015",
":",
"gh",
"=",
"igrf12coeffs",
"[",
"models",
".",
"index",
"(",
"model",
")",
"]",
"sv",
"=",
"old_div",
"(",
"(",
"igrf12coeffs",
"[",
"models",
".",
"index",
"(",
"model",
"+",
"5",
")",
"]",
"-",
"gh",
")",
",",
"5.",
")",
"x",
",",
"y",
",",
"z",
",",
"f",
"=",
"magsyn",
"(",
"gh",
",",
"sv",
",",
"model",
",",
"date",
",",
"itype",
",",
"alt",
",",
"colat",
",",
"lon",
")",
"else",
":",
"gh",
"=",
"igrf12coeffs",
"[",
"models",
".",
"index",
"(",
"2015",
")",
"]",
"sv",
"=",
"igrf12coeffs",
"[",
"models",
".",
"index",
"(",
"2015.20",
")",
"]",
"x",
",",
"y",
",",
"z",
",",
"f",
"=",
"magsyn",
"(",
"gh",
",",
"sv",
",",
"model",
",",
"date",
",",
"itype",
",",
"alt",
",",
"colat",
",",
"lon",
")",
"if",
"'coeffs'",
"in",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
":",
"return",
"gh",
"else",
":",
"return",
"x",
",",
"y",
",",
"z",
",",
"f"
] | Calculates the interpolated (<2015) or extrapolated (>2015) main field and
secular variation coefficients and passes them to the Malin and Barraclough
routine (function pmag.magsyn) to calculate the field from the coefficients.
Parameters:
-----------
lon : east longitude in degrees (0 to 360 or -180 to 180)
lat : latitude in degrees (-90 to 90)
alt : height above mean sea level in km (itype = 1 assumed)
date : Required date in years and decimals of a year (A.D.)
Optional Parameters:
-----------
coeffs : if True, then return the gh coefficients
mod : model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b','shadif14k')
arch3k (Korte et al., 2009)
cals3k (Korte and Constable, 2011)
cals10k.1b (Korte et al., 2011)
pfm9k (Nilsson et al., 2014)
hfm.OL1.A1 (Constable et al., 2016)
cals10k.2 (Constable et al., 2016)
shadif14k (Pavon-Carrasco et al. (2014)
NB : the first four of these models, are constrained to agree
with gufm1 (Jackson et al., 2000) for the past four centuries
Return
-----------
x : north component of the magnetic field in nT
y : east component of the magnetic field in nT
z : downward component of the magnetic field in nT
f : total magnetic field in nT
By default, igrf12 coefficients are used between 1900 and 2020
from http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html.
To check the results you can run the interactive program at the NGDC
www.ngdc.noaa.gov/geomag-web | [
"Calculates",
"the",
"interpolated",
"(",
"<2015",
")",
"or",
"extrapolated",
"(",
">",
"2015",
")",
"main",
"field",
"and",
"secular",
"variation",
"coefficients",
"and",
"passes",
"them",
"to",
"the",
"Malin",
"and",
"Barraclough",
"routine",
"(",
"function",
"pmag",
".",
"magsyn",
")",
"to",
"calculate",
"the",
"field",
"from",
"the",
"coefficients",
"."
] | python | train | 39.818898 |