text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
| texts
sequence | scores
sequence | num_lines
int64 3
2.77k
| avg_score
float64 0
0.37
|
---|---|---|---|---|---|---|---|
def target_from_compact(bits):
"""\
Extract a full target from its compact representation, undoing the
transformation x=compact_from_target(t). See compact_from_target() for
more information on this 32-bit floating point format."""
size = bits >> 24
word = bits & 0x007fffff
if size < 3:
word >>= 8 * (3 - size)
else:
word <<= 8 * (size - 3)
if bits & 0x00800000:
word = -word
return word | [
"def",
"target_from_compact",
"(",
"bits",
")",
":",
"size",
"=",
"bits",
">>",
"24",
"word",
"=",
"bits",
"&",
"0x007fffff",
"if",
"size",
"<",
"3",
":",
"word",
">>=",
"8",
"*",
"(",
"3",
"-",
"size",
")",
"else",
":",
"word",
"<<=",
"8",
"*",
"(",
"size",
"-",
"3",
")",
"if",
"bits",
"&",
"0x00800000",
":",
"word",
"=",
"-",
"word",
"return",
"word"
] | 31.357143 | 0.002212 | [
"def target_from_compact(bits):\n",
" \"\"\"\\\n",
" Extract a full target from its compact representation, undoing the\n",
" transformation x=compact_from_target(t). See compact_from_target() for\n",
" more information on this 32-bit floating point format.\"\"\"\n",
" size = bits >> 24\n",
" word = bits & 0x007fffff\n",
" if size < 3:\n",
" word >>= 8 * (3 - size)\n",
" else:\n",
" word <<= 8 * (size - 3)\n",
" if bits & 0x00800000:\n",
" word = -word\n",
" return word"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 14 | 0.004762 |
def is_integer(value,
coerce_value = False,
minimum = None,
maximum = None,
base = 10,
**kwargs):
"""Indicate whether ``value`` contains a whole number.
:param value: The value to evaluate.
:param coerce_value: If ``True``, will return ``True`` if ``value`` can be coerced
to whole number. If ``False``, will only return ``True`` if ``value`` is already
a whole number (regardless of type). Defaults to ``False``.
:type coerce_value: :class:`bool <python:bool>`
:param minimum: If supplied, will make sure that ``value`` is greater than or
equal to this value.
:type minimum: numeric
:param maximum: If supplied, will make sure that ``value`` is less than or
equal to this value.
:type maximum: numeric
:param base: Indicates the base that is used to determine the integer value.
The allowed values are 0 and 2–36. Base-2, -8, and -16 literals can be
optionally prefixed with ``0b/0B``, ``0o/0O/0``, or ``0x/0X``, as with
integer literals in code. Base 0 means to interpret the string exactly as
an integer literal, so that the actual base is 2, 8, 10, or 16. Defaults to
``10``.
:type base: :class:`int <python:int>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.integer(value,
coerce_value = coerce_value,
minimum = minimum,
maximum = maximum,
base = base,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | [
"def",
"is_integer",
"(",
"value",
",",
"coerce_value",
"=",
"False",
",",
"minimum",
"=",
"None",
",",
"maximum",
"=",
"None",
",",
"base",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"value",
"=",
"validators",
".",
"integer",
"(",
"value",
",",
"coerce_value",
"=",
"coerce_value",
",",
"minimum",
"=",
"minimum",
",",
"maximum",
"=",
"maximum",
",",
"base",
"=",
"base",
",",
"*",
"*",
"kwargs",
")",
"except",
"SyntaxError",
"as",
"error",
":",
"raise",
"error",
"except",
"Exception",
":",
"return",
"False",
"return",
"True"
] | 38.078431 | 0.011546 | [
"def is_integer(value,\n",
" coerce_value = False,\n",
" minimum = None,\n",
" maximum = None,\n",
" base = 10,\n",
" **kwargs):\n",
" \"\"\"Indicate whether ``value`` contains a whole number.\n",
"\n",
" :param value: The value to evaluate.\n",
"\n",
" :param coerce_value: If ``True``, will return ``True`` if ``value`` can be coerced\n",
" to whole number. If ``False``, will only return ``True`` if ``value`` is already\n",
" a whole number (regardless of type). Defaults to ``False``.\n",
" :type coerce_value: :class:`bool <python:bool>`\n",
"\n",
" :param minimum: If supplied, will make sure that ``value`` is greater than or\n",
" equal to this value.\n",
" :type minimum: numeric\n",
"\n",
" :param maximum: If supplied, will make sure that ``value`` is less than or\n",
" equal to this value.\n",
" :type maximum: numeric\n",
"\n",
" :param base: Indicates the base that is used to determine the integer value.\n",
" The allowed values are 0 and 2–36. Base-2, -8, and -16 literals can be\n",
" optionally prefixed with ``0b/0B``, ``0o/0O/0``, or ``0x/0X``, as with\n",
" integer literals in code. Base 0 means to interpret the string exactly as\n",
" an integer literal, so that the actual base is 2, 8, 10, or 16. Defaults to\n",
" ``10``.\n",
" :type base: :class:`int <python:int>`\n",
"\n",
" :returns: ``True`` if ``value`` is valid, ``False`` if it is not.\n",
" :rtype: :class:`bool <python:bool>`\n",
"\n",
" :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates\n",
" keyword parameters passed to the underlying validator\n",
"\n",
" \"\"\"\n",
" try:\n",
" value = validators.integer(value,\n",
" coerce_value = coerce_value,\n",
" minimum = minimum,\n",
" maximum = maximum,\n",
" base = base,\n",
" **kwargs)\n",
" except SyntaxError as error:\n",
" raise error\n",
" except Exception:\n",
" return False\n",
"\n",
" return True"
] | [
0,
0.05405405405405406,
0.06451612903225806,
0.06451612903225806,
0.07692307692307693,
0,
0,
0,
0,
0,
0.011494252873563218,
0.011494252873563218,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0.03125,
0.037037037037037035,
0.037037037037037035,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 51 | 0.010674 |
def _create_model(self, X, Y):
"""
Creates the model given some input data X and Y.
"""
from sklearn.ensemble import RandomForestRegressor
self.X = X
self.Y = Y
self.model = RandomForestRegressor(bootstrap = self.bootstrap,
criterion = self.criterion,
max_depth = self.max_depth,
max_features = self.max_features,
max_leaf_nodes = self.max_leaf_nodes,
min_samples_leaf = self.min_samples_leaf,
min_samples_split = self.min_samples_split,
min_weight_fraction_leaf = self.min_weight_fraction_leaf,
n_estimators = self.n_estimators,
n_jobs = self.n_jobs,
oob_score = self.oob_score,
random_state = self.random_state,
verbose = self.verbose,
warm_start = self.warm_start)
#self.model = RandomForestRegressor()
self.model.fit(X,Y.flatten()) | [
"def",
"_create_model",
"(",
"self",
",",
"X",
",",
"Y",
")",
":",
"from",
"sklearn",
".",
"ensemble",
"import",
"RandomForestRegressor",
"self",
".",
"X",
"=",
"X",
"self",
".",
"Y",
"=",
"Y",
"self",
".",
"model",
"=",
"RandomForestRegressor",
"(",
"bootstrap",
"=",
"self",
".",
"bootstrap",
",",
"criterion",
"=",
"self",
".",
"criterion",
",",
"max_depth",
"=",
"self",
".",
"max_depth",
",",
"max_features",
"=",
"self",
".",
"max_features",
",",
"max_leaf_nodes",
"=",
"self",
".",
"max_leaf_nodes",
",",
"min_samples_leaf",
"=",
"self",
".",
"min_samples_leaf",
",",
"min_samples_split",
"=",
"self",
".",
"min_samples_split",
",",
"min_weight_fraction_leaf",
"=",
"self",
".",
"min_weight_fraction_leaf",
",",
"n_estimators",
"=",
"self",
".",
"n_estimators",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
",",
"oob_score",
"=",
"self",
".",
"oob_score",
",",
"random_state",
"=",
"self",
".",
"random_state",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"warm_start",
"=",
"self",
".",
"warm_start",
")",
"#self.model = RandomForestRegressor()",
"self",
".",
"model",
".",
"fit",
"(",
"X",
",",
"Y",
".",
"flatten",
"(",
")",
")"
] | 56 | 0.026335 | [
"def _create_model(self, X, Y):\n",
" \"\"\"\n",
" Creates the model given some input data X and Y.\n",
" \"\"\"\n",
" from sklearn.ensemble import RandomForestRegressor\n",
" self.X = X\n",
" self.Y = Y\n",
" self.model = RandomForestRegressor(bootstrap = self.bootstrap,\n",
" criterion = self.criterion,\n",
" max_depth = self.max_depth,\n",
" max_features = self.max_features,\n",
" max_leaf_nodes = self.max_leaf_nodes,\n",
" min_samples_leaf = self.min_samples_leaf,\n",
" min_samples_split = self.min_samples_split,\n",
" min_weight_fraction_leaf = self.min_weight_fraction_leaf,\n",
" n_estimators = self.n_estimators,\n",
" n_jobs = self.n_jobs,\n",
" oob_score = self.oob_score,\n",
" random_state = self.random_state,\n",
" verbose = self.verbose,\n",
" warm_start = self.warm_start)\n",
"\n",
" #self.model = RandomForestRegressor()\n",
" self.model.fit(X,Y.flatten())"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.028169014084507043,
0.028169014084507043,
0.028169014084507043,
0.025974025974025976,
0.037037037037037035,
0.03529411764705882,
0.034482758620689655,
0.0297029702970297,
0.025974025974025976,
0.03076923076923077,
0.028169014084507043,
0.025974025974025976,
0.029850746268656716,
0.0273972602739726,
0,
0.021739130434782608,
0.05405405405405406
] | 24 | 0.023927 |
def calculate_current_allocation(self):
""" Calculates the current allocation % based on the value """
for ac in self.asset_classes:
ac.curr_alloc = ac.curr_value * 100 / self.total_amount | [
"def",
"calculate_current_allocation",
"(",
"self",
")",
":",
"for",
"ac",
"in",
"self",
".",
"asset_classes",
":",
"ac",
".",
"curr_alloc",
"=",
"ac",
".",
"curr_value",
"*",
"100",
"/",
"self",
".",
"total_amount"
] | 53.25 | 0.009259 | [
"def calculate_current_allocation(self):\n",
" \"\"\" Calculates the current allocation % based on the value \"\"\"\n",
" for ac in self.asset_classes:\n",
" ac.curr_alloc = ac.curr_value * 100 / self.total_amount"
] | [
0,
0.014084507042253521,
0,
0.014925373134328358
] | 4 | 0.007252 |
def _run_apps(self, paths):
""" Runs apps for the provided paths.
"""
for path in paths:
common.shell_process(path, background=True)
time.sleep(0.2) | [
"def",
"_run_apps",
"(",
"self",
",",
"paths",
")",
":",
"for",
"path",
"in",
"paths",
":",
"common",
".",
"shell_process",
"(",
"path",
",",
"background",
"=",
"True",
")",
"time",
".",
"sleep",
"(",
"0.2",
")"
] | 27.857143 | 0.00995 | [
"def _run_apps(self, paths):\n",
" \"\"\" Runs apps for the provided paths.\n",
" \"\"\"\n",
"\n",
" for path in paths:\n",
" common.shell_process(path, background=True)\n",
" time.sleep(0.2)"
] | [
0,
0.021739130434782608,
0,
0,
0,
0,
0.037037037037037035
] | 7 | 0.008397 |
def transmissions(self, direction="outgoing", status="all", failed=False):
"""Get transmissions sent to or from this node.
Direction can be "all", "incoming" or "outgoing" (default).
Status can be "all" (default), "pending", or "received".
failed can be True, False or "all"
"""
# check parameters
if direction not in ["incoming", "outgoing", "all"]:
raise(ValueError("You cannot get transmissions of direction {}."
.format(direction) +
"Type can only be incoming, outgoing or all."))
if status not in ["all", "pending", "received"]:
raise(ValueError("You cannot get transmission of status {}."
.format(status) +
"Status can only be pending, received or all"))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid transmission failed"
.format(failed))
# get transmissions
if direction == "all":
if status == "all":
return Transmission.query\
.filter(and_(Transmission.failed == False,
or_(Transmission.destination_id == self.id,
Transmission.origin_id == self.id)))\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.status == status,
or_(Transmission.destination_id == self.id,
Transmission.origin_id == self.id)))\
.all()
if direction == "incoming":
if status == "all":
return Transmission.query\
.filter_by(failed=False, destination_id=self.id)\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.destination_id == self.id,
Transmission.status == status))\
.all()
if direction == "outgoing":
if status == "all":
return Transmission.query\
.filter_by(failed=False, origin_id=self.id)\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.origin_id == self.id,
Transmission.status == status))\
.all() | [
"def",
"transmissions",
"(",
"self",
",",
"direction",
"=",
"\"outgoing\"",
",",
"status",
"=",
"\"all\"",
",",
"failed",
"=",
"False",
")",
":",
"# check parameters",
"if",
"direction",
"not",
"in",
"[",
"\"incoming\"",
",",
"\"outgoing\"",
",",
"\"all\"",
"]",
":",
"raise",
"(",
"ValueError",
"(",
"\"You cannot get transmissions of direction {}.\"",
".",
"format",
"(",
"direction",
")",
"+",
"\"Type can only be incoming, outgoing or all.\"",
")",
")",
"if",
"status",
"not",
"in",
"[",
"\"all\"",
",",
"\"pending\"",
",",
"\"received\"",
"]",
":",
"raise",
"(",
"ValueError",
"(",
"\"You cannot get transmission of status {}.\"",
".",
"format",
"(",
"status",
")",
"+",
"\"Status can only be pending, received or all\"",
")",
")",
"if",
"failed",
"not",
"in",
"[",
"\"all\"",
",",
"False",
",",
"True",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid transmission failed\"",
".",
"format",
"(",
"failed",
")",
")",
"# get transmissions",
"if",
"direction",
"==",
"\"all\"",
":",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"or_",
"(",
"Transmission",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"origin_id",
"==",
"self",
".",
"id",
")",
")",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"Transmission",
".",
"status",
"==",
"status",
",",
"or_",
"(",
"Transmission",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"origin_id",
"==",
"self",
".",
"id",
")",
")",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"incoming\"",
":",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter_by",
"(",
"failed",
"=",
"False",
",",
"destination_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"Transmission",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"status",
"==",
"status",
")",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"outgoing\"",
":",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter_by",
"(",
"failed",
"=",
"False",
",",
"origin_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"Transmission",
".",
"origin_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"status",
"==",
"status",
")",
")",
".",
"all",
"(",
")"
] | 45.40678 | 0.002192 | [
"def transmissions(self, direction=\"outgoing\", status=\"all\", failed=False):\n",
" \"\"\"Get transmissions sent to or from this node.\n",
"\n",
" Direction can be \"all\", \"incoming\" or \"outgoing\" (default).\n",
" Status can be \"all\" (default), \"pending\", or \"received\".\n",
" failed can be True, False or \"all\"\n",
" \"\"\"\n",
" # check parameters\n",
" if direction not in [\"incoming\", \"outgoing\", \"all\"]:\n",
" raise(ValueError(\"You cannot get transmissions of direction {}.\"\n",
" .format(direction) +\n",
" \"Type can only be incoming, outgoing or all.\"))\n",
"\n",
" if status not in [\"all\", \"pending\", \"received\"]:\n",
" raise(ValueError(\"You cannot get transmission of status {}.\"\n",
" .format(status) +\n",
" \"Status can only be pending, received or all\"))\n",
"\n",
" if failed not in [\"all\", False, True]:\n",
" raise ValueError(\"{} is not a valid transmission failed\"\n",
" .format(failed))\n",
"\n",
" # get transmissions\n",
" if direction == \"all\":\n",
" if status == \"all\":\n",
" return Transmission.query\\\n",
" .filter(and_(Transmission.failed == False,\n",
" or_(Transmission.destination_id == self.id,\n",
" Transmission.origin_id == self.id)))\\\n",
" .all()\n",
" else:\n",
" return Transmission.query\\\n",
" .filter(and_(Transmission.failed == False,\n",
" Transmission.status == status,\n",
" or_(Transmission.destination_id == self.id,\n",
" Transmission.origin_id == self.id)))\\\n",
" .all()\n",
" if direction == \"incoming\":\n",
" if status == \"all\":\n",
" return Transmission.query\\\n",
" .filter_by(failed=False, destination_id=self.id)\\\n",
" .all()\n",
" else:\n",
" return Transmission.query\\\n",
" .filter(and_(Transmission.failed == False,\n",
" Transmission.destination_id == self.id,\n",
" Transmission.status == status))\\\n",
" .all()\n",
" if direction == \"outgoing\":\n",
" if status == \"all\":\n",
" return Transmission.query\\\n",
" .filter_by(failed=False, origin_id=self.id)\\\n",
" .all()\n",
" else:\n",
" return Transmission.query\\\n",
" .filter(and_(Transmission.failed == False,\n",
" Transmission.origin_id == self.id,\n",
" Transmission.status == status))\\\n",
" .all()"
] | [
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0.038461538461538464
] | 59 | 0.002031 |
def additional_files(self):
"""Get a list of absolute paths to the additional config files."""
return [os.path.join(f, self.filename) for f in self.additional_dirs] | [
"def",
"additional_files",
"(",
"self",
")",
":",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"f",
",",
"self",
".",
"filename",
")",
"for",
"f",
"in",
"self",
".",
"additional_dirs",
"]"
] | 59.333333 | 0.011111 | [
"def additional_files(self):\n",
" \"\"\"Get a list of absolute paths to the additional config files.\"\"\"\n",
" return [os.path.join(f, self.filename) for f in self.additional_dirs]"
] | [
0,
0.013333333333333334,
0.012987012987012988
] | 3 | 0.008773 |
def channels_create(self, name, **kwargs):
"""Creates a new public channel, optionally including users."""
return self.__call_api_post('channels.create', name=name, kwargs=kwargs) | [
"def",
"channels_create",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"__call_api_post",
"(",
"'channels.create'",
",",
"name",
"=",
"name",
",",
"kwargs",
"=",
"kwargs",
")"
] | 64.333333 | 0.015385 | [
"def channels_create(self, name, **kwargs):\n",
" \"\"\"Creates a new public channel, optionally including users.\"\"\"\n",
" return self.__call_api_post('channels.create', name=name, kwargs=kwargs)"
] | [
0,
0.013888888888888888,
0.025
] | 3 | 0.012963 |
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot | [
"def",
"bootstrap",
"(",
"v",
")",
":",
"if",
"type",
"(",
"v",
")",
"==",
"list",
":",
"vboot",
"=",
"[",
"]",
"# list of boostrapped arrays",
"n",
"=",
"v",
"[",
"0",
"]",
".",
"size",
"iran",
"=",
"scipy",
".",
"random",
".",
"randint",
"(",
"0",
",",
"n",
",",
"n",
")",
"# Array of random indexes",
"for",
"x",
"in",
"v",
":",
"vboot",
".",
"append",
"(",
"x",
"[",
"iran",
"]",
")",
"else",
":",
"# if v is an array, not a list of arrays",
"n",
"=",
"v",
".",
"size",
"iran",
"=",
"scipy",
".",
"random",
".",
"randint",
"(",
"0",
",",
"n",
",",
"n",
")",
"# Array of random indexes",
"vboot",
"=",
"v",
"[",
"iran",
"]",
"return",
"vboot"
] | 27.888889 | 0.043646 | [
"def bootstrap(v):\n",
"\t\"\"\"\n",
"Constructs Monte Carlo simulated data set using the\n",
"Bootstrap algorithm. \n",
"\n",
"Usage:\n",
"\n",
">>> bootstrap(x)\n",
"\n",
"where x is either an array or a list of arrays. If it is a\n",
"list, the code returns the corresponding list of bootstrapped \n",
"arrays assuming that the same position in these arrays map the \n",
"same \"physical\" object.\n",
"\n",
"Rodrigo Nemmen, http://goo.gl/8S1Oo\n",
"\t\"\"\"\n",
"\tif type(v)==list:\n",
"\t\tvboot=[]\t# list of boostrapped arrays\n",
"\t\tn=v[0].size\n",
"\t\tiran=scipy.random.randint(0,n,n)\t# Array of random indexes\n",
"\t\tfor x in v:\tvboot.append(x[iran])\n",
"\telse:\t# if v is an array, not a list of arrays\n",
"\t\tn=v.size\n",
"\t\tiran=scipy.random.randint(0,n,n)\t# Array of random indexes\n",
"\t\tvboot=v[iran]\n",
"\t\n",
"\treturn vboot"
] | [
0,
0.2,
0,
0.009615384615384616,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0.015625,
0,
0,
0,
0.2,
0.10526315789473684,
0.075,
0.14285714285714285,
0.08196721311475409,
0.05555555555555555,
0.041666666666666664,
0.18181818181818182,
0.08196721311475409,
0.125,
1,
0.15384615384615385
] | 27 | 0.092076 |
def prune(self, dir):
"""Filter out files from 'dir/'."""
match = translate_pattern(os.path.join(dir, '**'))
return self._remove_files(match.match) | [
"def",
"prune",
"(",
"self",
",",
"dir",
")",
":",
"match",
"=",
"translate_pattern",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"'**'",
")",
")",
"return",
"self",
".",
"_remove_files",
"(",
"match",
".",
"match",
")"
] | 42 | 0.011696 | [
"def prune(self, dir):\n",
" \"\"\"Filter out files from 'dir/'.\"\"\"\n",
" match = translate_pattern(os.path.join(dir, '**'))\n",
" return self._remove_files(match.match)"
] | [
0,
0.022727272727272728,
0,
0.021739130434782608
] | 4 | 0.011117 |
def _evaluate_selection_mask(self, name="default", i1=None, i2=None, selection=None, cache=False):
"""Internal use, ignores the filter"""
i1 = i1 or 0
i2 = i2 or len(self)
scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache)
return scope.evaluate(name) | [
"def",
"_evaluate_selection_mask",
"(",
"self",
",",
"name",
"=",
"\"default\"",
",",
"i1",
"=",
"None",
",",
"i2",
"=",
"None",
",",
"selection",
"=",
"None",
",",
"cache",
"=",
"False",
")",
":",
"i1",
"=",
"i1",
"or",
"0",
"i2",
"=",
"i2",
"or",
"len",
"(",
"self",
")",
"scope",
"=",
"scopes",
".",
"_BlockScopeSelection",
"(",
"self",
",",
"i1",
",",
"i2",
",",
"selection",
",",
"cache",
"=",
"cache",
")",
"return",
"scope",
".",
"evaluate",
"(",
"name",
")"
] | 51.333333 | 0.01278 | [
"def _evaluate_selection_mask(self, name=\"default\", i1=None, i2=None, selection=None, cache=False):\n",
" \"\"\"Internal use, ignores the filter\"\"\"\n",
" i1 = i1 or 0\n",
" i2 = i2 or len(self)\n",
" scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache)\n",
" return scope.evaluate(name)"
] | [
0.010101010101010102,
0.02127659574468085,
0,
0,
0.012195121951219513,
0.02857142857142857
] | 6 | 0.012024 |
def _get_channels(self):
"""
获取channel列表
"""
self._channel_list = [
# {'name': '红心兆赫', 'channel_id': -3},
{'name': '我的私人兆赫', 'channel_id': 0},
{'name': '每日私人歌单', 'channel_id': -2},
{'name': '豆瓣精选兆赫', 'channel_id': -10},
# 心情 / 场景
{'name': '工作学习', 'channel_id': 153},
{'name': '户外', 'channel_id': 151},
{'name': '休息', 'channel_id': 152},
{'name': '亢奋', 'channel_id': 154},
{'name': '舒缓', 'channel_id': 155},
{'name': 'Easy', 'channel_id': 77},
{'name': '咖啡', 'channel_id': 32},
{'name': '运动', 'channel_id': 257},
# 语言 / 年代
{'name': '华语', 'channel_id': 1},
{'name': '欧美', 'channel_id': 2},
{'name': '七零', 'channel_id': 3},
{'name': '八零', 'channel_id': 4},
{'name': '九零', 'channel_id': 5},
{'name': '粤语', 'channel_id': 6},
{'name': '日语', 'channel_id': 17},
{'name': '韩语', 'channel_id': 18},
{'name': '法语', 'channel_id': 22},
{'name': '新歌', 'channel_id': 61},
# 风格 / 流派
{'name': '流行', 'channel_id': 194},
{'name': '摇滚', 'channel_id': 7},
{'name': '民谣', 'channel_id': 8},
{'name': '轻音乐', 'channel_id': 9},
{'name': '电影原声', 'channel_id': 10},
{'name': '爵士', 'channel_id': 13},
{'name': '电子', 'channel_id': 14},
{'name': '说唱', 'channel_id': 15},
{'name': 'R&B', 'channel_id': 16},
{'name': '古典', 'channel_id': 27},
{'name': '动漫', 'channel_id': 28},
{'name': '世界音乐', 'channel_id': 187},
{'name': '布鲁斯', 'channel_id': 188},
{'name': '拉丁', 'channel_id': 189},
{'name': '雷鬼', 'channel_id': 190},
{'name': '小清新', 'channel_id': 76}
] | [
"def",
"_get_channels",
"(",
"self",
")",
":",
"self",
".",
"_channel_list",
"=",
"[",
"# {'name': '红心兆赫', 'channel_id': -3},",
"{",
"'name'",
":",
"'我的私人兆赫', 'channel_i",
"d",
": 0},",
"",
"",
"",
"",
"{",
"'name'",
":",
"'每日私人歌单', 'channel_i",
"d",
": -2},",
"",
"",
"",
"",
"",
"{",
"'name'",
":",
"'豆瓣精选兆赫', 'channel_i",
"d",
": -10},",
"",
"",
"",
"",
"",
"# 心情 / 场景",
"{",
"'name'",
":",
"'工作学习', 'chann",
"e",
"_id': 153},",
"",
"",
"",
"",
"{",
"'name'",
":",
"'户外', 'c",
"h",
"nnel_id': 15",
"1",
",",
"",
"",
"{",
"'name'",
":",
"'休息', 'c",
"h",
"nnel_id': 15",
"2",
",",
"",
"",
"{",
"'name'",
":",
"'亢奋', 'c",
"h",
"nnel_id': 15",
"4",
",",
"",
"",
"{",
"'name'",
":",
"'舒缓', 'c",
"h",
"nnel_id': 15",
"5",
",",
"",
"",
"{",
"'name'",
":",
"'Easy'",
",",
"'channel_id'",
":",
"77",
"}",
",",
"{",
"'name'",
":",
"'咖啡', 'c",
"h",
"nnel_id': 32",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'运动', 'c",
"h",
"nnel_id': 25",
"7",
",",
"",
"",
"# 语言 / 年代",
"{",
"'name'",
":",
"'华语', 'c",
"h",
"nnel_id': 1}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'欧美', 'c",
"h",
"nnel_id': 2}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'七零', 'c",
"h",
"nnel_id': 3}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'八零', 'c",
"h",
"nnel_id': 4}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'九零', 'c",
"h",
"nnel_id': 5}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'粤语', 'c",
"h",
"nnel_id': 6}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'日语', 'c",
"h",
"nnel_id': 17",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'韩语', 'c",
"h",
"nnel_id': 18",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'法语', 'c",
"h",
"nnel_id': 22",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'新歌', 'c",
"h",
"nnel_id': 61",
"}",
"",
"",
"",
"# 风格 / 流派",
"{",
"'name'",
":",
"'流行', 'c",
"h",
"nnel_id': 19",
"4",
",",
"",
"",
"{",
"'name'",
":",
"'摇滚', 'c",
"h",
"nnel_id': 7}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'民谣', 'c",
"h",
"nnel_id': 8}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'轻音乐', 'cha",
"n",
"el_id': 9},",
"",
"",
"",
"",
"{",
"'name'",
":",
"'电影原声', 'chann",
"e",
"_id': 10},",
"",
"",
"",
"",
"{",
"'name'",
":",
"'爵士', 'c",
"h",
"nnel_id': 13",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'电子', 'c",
"h",
"nnel_id': 14",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'说唱', 'c",
"h",
"nnel_id': 15",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'R&B'",
",",
"'channel_id'",
":",
"16",
"}",
",",
"{",
"'name'",
":",
"'古典', 'c",
"h",
"nnel_id': 27",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'动漫', 'c",
"h",
"nnel_id': 28",
"}",
"",
"",
"",
"{",
"'name'",
":",
"'世界音乐', 'chann",
"e",
"_id': 187},",
"",
"",
"",
"",
"{",
"'name'",
":",
"'布鲁斯', 'cha",
"n",
"el_id': 188}",
",",
"",
"",
"",
"{",
"'name'",
":",
"'拉丁', 'c",
"h",
"nnel_id': 18",
"9",
",",
"",
"",
"{",
"'name'",
":",
"'雷鬼', 'c",
"h",
"nnel_id': 19",
"0",
",",
"",
"",
"{",
"'name'",
":",
"'小清新', 'cha",
"n",
"el_id': 76}",
"",
"",
"",
"]"
] | 40.553191 | 0.001025 | [
"def _get_channels(self):\n",
" \"\"\"\n",
" 获取channel列表\n",
" \"\"\"\n",
" self._channel_list = [\n",
" # {'name': '红心兆赫', 'channel_id': -3},\n",
" {'name': '我的私人兆赫', 'channel_id': 0},\n",
" {'name': '每日私人歌单', 'channel_id': -2},\n",
" {'name': '豆瓣精选兆赫', 'channel_id': -10},\n",
" # 心情 / 场景\n",
" {'name': '工作学习', 'channel_id': 153},\n",
" {'name': '户外', 'channel_id': 151},\n",
" {'name': '休息', 'channel_id': 152},\n",
" {'name': '亢奋', 'channel_id': 154},\n",
" {'name': '舒缓', 'channel_id': 155},\n",
" {'name': 'Easy', 'channel_id': 77},\n",
" {'name': '咖啡', 'channel_id': 32},\n",
" {'name': '运动', 'channel_id': 257},\n",
" # 语言 / 年代\n",
" {'name': '华语', 'channel_id': 1},\n",
" {'name': '欧美', 'channel_id': 2},\n",
" {'name': '七零', 'channel_id': 3},\n",
" {'name': '八零', 'channel_id': 4},\n",
" {'name': '九零', 'channel_id': 5},\n",
" {'name': '粤语', 'channel_id': 6},\n",
" {'name': '日语', 'channel_id': 17},\n",
" {'name': '韩语', 'channel_id': 18},\n",
" {'name': '法语', 'channel_id': 22},\n",
" {'name': '新歌', 'channel_id': 61},\n",
" # 风格 / 流派\n",
" {'name': '流行', 'channel_id': 194},\n",
" {'name': '摇滚', 'channel_id': 7},\n",
" {'name': '民谣', 'channel_id': 8},\n",
" {'name': '轻音乐', 'channel_id': 9},\n",
" {'name': '电影原声', 'channel_id': 10},\n",
" {'name': '爵士', 'channel_id': 13},\n",
" {'name': '电子', 'channel_id': 14},\n",
" {'name': '说唱', 'channel_id': 15},\n",
" {'name': 'R&B', 'channel_id': 16},\n",
" {'name': '古典', 'channel_id': 27},\n",
" {'name': '动漫', 'channel_id': 28},\n",
" {'name': '世界音乐', 'channel_id': 187},\n",
" {'name': '布鲁斯', 'channel_id': 188},\n",
" {'name': '拉丁', 'channel_id': 189},\n",
" {'name': '雷鬼', 'channel_id': 190},\n",
" {'name': '小清新', 'channel_id': 76}\n",
" ]"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111
] | 47 | 0.004137 |
def btc_make_p2sh_address( script_hex ):
"""
Make a P2SH address from a hex script
"""
h = hashing.bin_hash160(binascii.unhexlify(script_hex))
addr = bin_hash160_to_address(h, version_byte=multisig_version_byte)
return addr | [
"def",
"btc_make_p2sh_address",
"(",
"script_hex",
")",
":",
"h",
"=",
"hashing",
".",
"bin_hash160",
"(",
"binascii",
".",
"unhexlify",
"(",
"script_hex",
")",
")",
"addr",
"=",
"bin_hash160_to_address",
"(",
"h",
",",
"version_byte",
"=",
"multisig_version_byte",
")",
"return",
"addr"
] | 34.428571 | 0.012146 | [
"def btc_make_p2sh_address( script_hex ):\n",
" \"\"\"\n",
" Make a P2SH address from a hex script\n",
" \"\"\"\n",
" h = hashing.bin_hash160(binascii.unhexlify(script_hex))\n",
" addr = bin_hash160_to_address(h, version_byte=multisig_version_byte)\n",
" return addr"
] | [
0.04878048780487805,
0,
0,
0,
0,
0,
0.06666666666666667
] | 7 | 0.016492 |
def extend_node_list(acc, new):
"""Extend accumulator with Node(s) from new"""
if new is None:
new = []
elif not isinstance(new, list):
new = [new]
return acc + new | [
"def",
"extend_node_list",
"(",
"acc",
",",
"new",
")",
":",
"if",
"new",
"is",
"None",
":",
"new",
"=",
"[",
"]",
"elif",
"not",
"isinstance",
"(",
"new",
",",
"list",
")",
":",
"new",
"=",
"[",
"new",
"]",
"return",
"acc",
"+",
"new"
] | 30.571429 | 0.009091 | [
"def extend_node_list(acc, new):\n",
" \"\"\"Extend accumulator with Node(s) from new\"\"\"\n",
" if new is None:\n",
" new = []\n",
" elif not isinstance(new, list):\n",
" new = [new]\n",
" return acc + new"
] | [
0,
0.01818181818181818,
0,
0,
0,
0,
0.041666666666666664
] | 7 | 0.00855 |
def update(self, virtual_interfaces):
"""
Method to update Virtual Interfaces
:param Virtual Interfaces: List containing Virtual Interfaces desired to updated
:return: None
"""
data = {'virtual_interfaces': virtual_interfaces}
virtual_interfaces_ids = [str(env.get('id')) for env in virtual_interfaces]
return super(ApiV4VirtualInterface, self).put\
('api/v4/virtual-interface/%s/' % ';'.join(virtual_interfaces_ids), data) | [
"def",
"update",
"(",
"self",
",",
"virtual_interfaces",
")",
":",
"data",
"=",
"{",
"'virtual_interfaces'",
":",
"virtual_interfaces",
"}",
"virtual_interfaces_ids",
"=",
"[",
"str",
"(",
"env",
".",
"get",
"(",
"'id'",
")",
")",
"for",
"env",
"in",
"virtual_interfaces",
"]",
"return",
"super",
"(",
"ApiV4VirtualInterface",
",",
"self",
")",
".",
"put",
"(",
"'api/v4/virtual-interface/%s/'",
"%",
"';'",
".",
"join",
"(",
"virtual_interfaces_ids",
")",
",",
"data",
")"
] | 37.692308 | 0.011952 | [
"def update(self, virtual_interfaces):\n",
" \"\"\"\n",
" Method to update Virtual Interfaces\n",
"\n",
" :param Virtual Interfaces: List containing Virtual Interfaces desired to updated\n",
" :return: None\n",
" \"\"\"\n",
"\n",
" data = {'virtual_interfaces': virtual_interfaces}\n",
" virtual_interfaces_ids = [str(env.get('id')) for env in virtual_interfaces]\n",
"\n",
" return super(ApiV4VirtualInterface, self).put\\\n",
" ('api/v4/virtual-interface/%s/' % ';'.join(virtual_interfaces_ids), data)"
] | [
0,
0.08333333333333333,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0.011904761904761904,
0,
0.01818181818181818,
0.023529411764705882
] | 13 | 0.011399 |
def _compute(self):
"""
The main method of the class, which computes an MCS given its
over-approximation. The over-approximation is defined by a model
for the hard part of the formula obtained in :func:`compute`.
The method is essentially a simple loop going over all literals
unsatisfied by the previous model, i.e. the literals of
``self.setd`` and checking which literals can be satisfied. This
process can be seen a refinement of the over-approximation of the
MCS. The algorithm follows the pseudo-code of the LBX algorithm
presented in [1]_.
Additionally, if :class:`LBX` was constructed with the requirement
to make "clause :math:`D`" calls, the method calls
:func:`do_cld_check` at every iteration of the loop using the
literals of ``self.setd`` not yet checked, as the contents of
"clause :math:`D`".
"""
# unless clause D checks are used, test one literal at a time
# and add it either to satisfied of backbone assumptions
i = 0
while i < len(self.setd):
if self.ucld:
self.do_cld_check(self.setd[i:])
i = 0
if self.setd: # if may be empty after the clause D check
if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [self.setd[i]]):
# filtering satisfied clauses
self._filter_satisfied()
else:
# current literal is backbone
self.bb_assumps.append(-self.setd[i])
i += 1 | [
"def",
"_compute",
"(",
"self",
")",
":",
"# unless clause D checks are used, test one literal at a time",
"# and add it either to satisfied of backbone assumptions",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"self",
".",
"setd",
")",
":",
"if",
"self",
".",
"ucld",
":",
"self",
".",
"do_cld_check",
"(",
"self",
".",
"setd",
"[",
"i",
":",
"]",
")",
"i",
"=",
"0",
"if",
"self",
".",
"setd",
":",
"# if may be empty after the clause D check",
"if",
"self",
".",
"oracle",
".",
"solve",
"(",
"assumptions",
"=",
"self",
".",
"ss_assumps",
"+",
"self",
".",
"bb_assumps",
"+",
"[",
"self",
".",
"setd",
"[",
"i",
"]",
"]",
")",
":",
"# filtering satisfied clauses",
"self",
".",
"_filter_satisfied",
"(",
")",
"else",
":",
"# current literal is backbone",
"self",
".",
"bb_assumps",
".",
"append",
"(",
"-",
"self",
".",
"setd",
"[",
"i",
"]",
")",
"i",
"+=",
"1"
] | 44.891892 | 0.001768 | [
"def _compute(self):\n",
" \"\"\"\n",
" The main method of the class, which computes an MCS given its\n",
" over-approximation. The over-approximation is defined by a model\n",
" for the hard part of the formula obtained in :func:`compute`.\n",
"\n",
" The method is essentially a simple loop going over all literals\n",
" unsatisfied by the previous model, i.e. the literals of\n",
" ``self.setd`` and checking which literals can be satisfied. This\n",
" process can be seen a refinement of the over-approximation of the\n",
" MCS. The algorithm follows the pseudo-code of the LBX algorithm\n",
" presented in [1]_.\n",
"\n",
" Additionally, if :class:`LBX` was constructed with the requirement\n",
" to make \"clause :math:`D`\" calls, the method calls\n",
" :func:`do_cld_check` at every iteration of the loop using the\n",
" literals of ``self.setd`` not yet checked, as the contents of\n",
" \"clause :math:`D`\".\n",
" \"\"\"\n",
"\n",
" # unless clause D checks are used, test one literal at a time\n",
" # and add it either to satisfied of backbone assumptions\n",
" i = 0\n",
" while i < len(self.setd):\n",
" if self.ucld:\n",
" self.do_cld_check(self.setd[i:])\n",
" i = 0\n",
"\n",
" if self.setd: # if may be empty after the clause D check\n",
" if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [self.setd[i]]):\n",
" # filtering satisfied clauses\n",
" self._filter_satisfied()\n",
" else:\n",
" # current literal is backbone\n",
" self.bb_assumps.append(-self.setd[i])\n",
"\n",
" i += 1"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 37 | 0.004019 |
def next_update(self):
"""Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled.
"""
delta = None
if self.frequency == 'daily':
delta = timedelta(days=1)
elif self.frequency == 'weekly':
delta = timedelta(weeks=1)
elif self.frequency == 'fortnighly':
delta = timedelta(weeks=2)
elif self.frequency == 'monthly':
delta = timedelta(weeks=4)
elif self.frequency == 'bimonthly':
delta = timedelta(weeks=4 * 2)
elif self.frequency == 'quarterly':
delta = timedelta(weeks=52 / 4)
elif self.frequency == 'biannual':
delta = timedelta(weeks=52 / 2)
elif self.frequency == 'annual':
delta = timedelta(weeks=52)
elif self.frequency == 'biennial':
delta = timedelta(weeks=52 * 2)
elif self.frequency == 'triennial':
delta = timedelta(weeks=52 * 3)
elif self.frequency == 'quinquennial':
delta = timedelta(weeks=52 * 5)
if delta is None:
return
else:
return self.last_update + delta | [
"def",
"next_update",
"(",
"self",
")",
":",
"delta",
"=",
"None",
"if",
"self",
".",
"frequency",
"==",
"'daily'",
":",
"delta",
"=",
"timedelta",
"(",
"days",
"=",
"1",
")",
"elif",
"self",
".",
"frequency",
"==",
"'weekly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
"elif",
"self",
".",
"frequency",
"==",
"'fortnighly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'monthly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"4",
")",
"elif",
"self",
".",
"frequency",
"==",
"'bimonthly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"4",
"*",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'quarterly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"/",
"4",
")",
"elif",
"self",
".",
"frequency",
"==",
"'biannual'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"/",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'annual'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
")",
"elif",
"self",
".",
"frequency",
"==",
"'biennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'triennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"3",
")",
"elif",
"self",
".",
"frequency",
"==",
"'quinquennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"5",
")",
"if",
"delta",
"is",
"None",
":",
"return",
"else",
":",
"return",
"self",
".",
"last_update",
"+",
"delta"
] | 36.515152 | 0.001617 | [
"def next_update(self):\n",
" \"\"\"Compute the next expected update date,\n",
"\n",
" given the frequency and last_update.\n",
" Return None if the frequency is not handled.\n",
" \"\"\"\n",
" delta = None\n",
" if self.frequency == 'daily':\n",
" delta = timedelta(days=1)\n",
" elif self.frequency == 'weekly':\n",
" delta = timedelta(weeks=1)\n",
" elif self.frequency == 'fortnighly':\n",
" delta = timedelta(weeks=2)\n",
" elif self.frequency == 'monthly':\n",
" delta = timedelta(weeks=4)\n",
" elif self.frequency == 'bimonthly':\n",
" delta = timedelta(weeks=4 * 2)\n",
" elif self.frequency == 'quarterly':\n",
" delta = timedelta(weeks=52 / 4)\n",
" elif self.frequency == 'biannual':\n",
" delta = timedelta(weeks=52 / 2)\n",
" elif self.frequency == 'annual':\n",
" delta = timedelta(weeks=52)\n",
" elif self.frequency == 'biennial':\n",
" delta = timedelta(weeks=52 * 2)\n",
" elif self.frequency == 'triennial':\n",
" delta = timedelta(weeks=52 * 3)\n",
" elif self.frequency == 'quinquennial':\n",
" delta = timedelta(weeks=52 * 5)\n",
" if delta is None:\n",
" return\n",
" else:\n",
" return self.last_update + delta"
] | [
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372
] | 33 | 0.001311 |
def _check_pwm_list(pwm_list):
"""Check the input validity
"""
for pwm in pwm_list:
if not isinstance(pwm, PWM):
raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm))
return True | [
"def",
"_check_pwm_list",
"(",
"pwm_list",
")",
":",
"for",
"pwm",
"in",
"pwm_list",
":",
"if",
"not",
"isinstance",
"(",
"pwm",
",",
"PWM",
")",
":",
"raise",
"TypeError",
"(",
"\"element {0} of pwm_list is not of type PWM\"",
".",
"format",
"(",
"pwm",
")",
")",
"return",
"True"
] | 32.571429 | 0.008547 | [
"def _check_pwm_list(pwm_list):\n",
" \"\"\"Check the input validity\n",
" \"\"\"\n",
" for pwm in pwm_list:\n",
" if not isinstance(pwm, PWM):\n",
" raise TypeError(\"element {0} of pwm_list is not of type PWM\".format(pwm))\n",
" return True"
] | [
0,
0,
0,
0,
0,
0.011627906976744186,
0.06666666666666667
] | 7 | 0.011185 |
def mark_locations(h,section,locs,markspec='or',**kwargs):
"""
Marks one or more locations on along a section. Could be used to
mark the location of a recording or electrical stimulation.
Args:
h = hocObject to interface with neuron
section = reference to section
locs = float between 0 and 1, or array of floats
optional arguments specify details of marker
Returns:
line = reference to plotted markers
"""
# get list of cartesian coordinates specifying section path
xyz = get_section_path(h,section)
(r,theta,phi) = sequential_spherical(xyz)
rcum = np.append(0,np.cumsum(r))
# convert locs into lengths from the beginning of the path
if type(locs) is float or type(locs) is np.float64:
locs = np.array([locs])
if type(locs) is list:
locs = np.array(locs)
lengths = locs*rcum[-1]
# find cartesian coordinates for markers
xyz_marks = []
for targ_length in lengths:
xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))
xyz_marks = np.array(xyz_marks)
# plot markers
line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \
xyz_marks[:,2], markspec, **kwargs)
return line | [
"def",
"mark_locations",
"(",
"h",
",",
"section",
",",
"locs",
",",
"markspec",
"=",
"'or'",
",",
"*",
"*",
"kwargs",
")",
":",
"# get list of cartesian coordinates specifying section path",
"xyz",
"=",
"get_section_path",
"(",
"h",
",",
"section",
")",
"(",
"r",
",",
"theta",
",",
"phi",
")",
"=",
"sequential_spherical",
"(",
"xyz",
")",
"rcum",
"=",
"np",
".",
"append",
"(",
"0",
",",
"np",
".",
"cumsum",
"(",
"r",
")",
")",
"# convert locs into lengths from the beginning of the path",
"if",
"type",
"(",
"locs",
")",
"is",
"float",
"or",
"type",
"(",
"locs",
")",
"is",
"np",
".",
"float64",
":",
"locs",
"=",
"np",
".",
"array",
"(",
"[",
"locs",
"]",
")",
"if",
"type",
"(",
"locs",
")",
"is",
"list",
":",
"locs",
"=",
"np",
".",
"array",
"(",
"locs",
")",
"lengths",
"=",
"locs",
"*",
"rcum",
"[",
"-",
"1",
"]",
"# find cartesian coordinates for markers",
"xyz_marks",
"=",
"[",
"]",
"for",
"targ_length",
"in",
"lengths",
":",
"xyz_marks",
".",
"append",
"(",
"find_coord",
"(",
"targ_length",
",",
"xyz",
",",
"rcum",
",",
"theta",
",",
"phi",
")",
")",
"xyz_marks",
"=",
"np",
".",
"array",
"(",
"xyz_marks",
")",
"# plot markers",
"line",
",",
"=",
"plt",
".",
"plot",
"(",
"xyz_marks",
"[",
":",
",",
"0",
"]",
",",
"xyz_marks",
"[",
":",
",",
"1",
"]",
",",
"xyz_marks",
"[",
":",
",",
"2",
"]",
",",
"markspec",
",",
"*",
"*",
"kwargs",
")",
"return",
"line"
] | 32.675676 | 0.013655 | [
"def mark_locations(h,section,locs,markspec='or',**kwargs):\n",
" \"\"\"\n",
" Marks one or more locations on along a section. Could be used to\n",
" mark the location of a recording or electrical stimulation.\n",
"\n",
" Args:\n",
" h = hocObject to interface with neuron\n",
" section = reference to section\n",
" locs = float between 0 and 1, or array of floats\n",
" optional arguments specify details of marker\n",
"\n",
" Returns:\n",
" line = reference to plotted markers\n",
" \"\"\"\n",
"\n",
" # get list of cartesian coordinates specifying section path\n",
" xyz = get_section_path(h,section)\n",
" (r,theta,phi) = sequential_spherical(xyz)\n",
" rcum = np.append(0,np.cumsum(r))\n",
"\n",
" # convert locs into lengths from the beginning of the path\n",
" if type(locs) is float or type(locs) is np.float64:\n",
" locs = np.array([locs])\n",
" if type(locs) is list:\n",
" locs = np.array(locs)\n",
" lengths = locs*rcum[-1]\n",
"\n",
" # find cartesian coordinates for markers\n",
" xyz_marks = []\n",
" for targ_length in lengths:\n",
" xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))\n",
" xyz_marks = np.array(xyz_marks)\n",
"\n",
" # plot markers\n",
" line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \\\n",
" xyz_marks[:,2], markspec, **kwargs)\n",
" return line"
] | [
0.06779661016949153,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0.043478260869565216,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.057971014492753624,
0,
0,
0,
0.05454545454545454,
0.017543859649122806,
0.06666666666666667
] | 37 | 0.009766 |
def find_command(cmd, path=None, pathext=None):
"""
Taken `from Django http://bit.ly/1njB3Y9>`_.
"""
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, string_types):
path = [path]
# check if there are path extensions for Windows executables
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
pathext = pathext.split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None | [
"def",
"find_command",
"(",
"cmd",
",",
"path",
"=",
"None",
",",
"pathext",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"if",
"isinstance",
"(",
"path",
",",
"string_types",
")",
":",
"path",
"=",
"[",
"path",
"]",
"# check if there are path extensions for Windows executables",
"if",
"pathext",
"is",
"None",
":",
"pathext",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATHEXT'",
",",
"'.COM;.EXE;.BAT;.CMD'",
")",
"pathext",
"=",
"pathext",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"# don't use extensions if the command ends with one of them",
"for",
"ext",
"in",
"pathext",
":",
"if",
"cmd",
".",
"endswith",
"(",
"ext",
")",
":",
"pathext",
"=",
"[",
"''",
"]",
"break",
"# check if we find the command on PATH",
"for",
"p",
"in",
"path",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"cmd",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"return",
"f",
"for",
"ext",
"in",
"pathext",
":",
"fext",
"=",
"f",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fext",
")",
":",
"return",
"fext",
"return",
"None"
] | 29.1 | 0.001109 | [
"def find_command(cmd, path=None, pathext=None):\n",
" \"\"\"\n",
" Taken `from Django http://bit.ly/1njB3Y9>`_.\n",
" \"\"\"\n",
" if path is None:\n",
" path = os.environ.get('PATH', '').split(os.pathsep)\n",
" if isinstance(path, string_types):\n",
" path = [path]\n",
"\n",
" # check if there are path extensions for Windows executables\n",
" if pathext is None:\n",
" pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')\n",
" pathext = pathext.split(os.pathsep)\n",
"\n",
" # don't use extensions if the command ends with one of them\n",
" for ext in pathext:\n",
" if cmd.endswith(ext):\n",
" pathext = ['']\n",
" break\n",
"\n",
" # check if we find the command on PATH\n",
" for p in path:\n",
" f = os.path.join(p, cmd)\n",
" if os.path.isfile(f):\n",
" return f\n",
" for ext in pathext:\n",
" fext = f + ext\n",
" if os.path.isfile(fext):\n",
" return fext\n",
" return None"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 30 | 0.002222 |
def get_max_drawdown_underwater(underwater):
"""
Determines peak, valley, and recovery dates given an 'underwater'
DataFrame.
An underwater DataFrame is a DataFrame that has precomputed
rolling drawdown.
Parameters
----------
underwater : pd.Series
Underwater returns (rolling drawdown) of a strategy.
Returns
-------
peak : datetime
The maximum drawdown's peak.
valley : datetime
The maximum drawdown's valley.
recovery : datetime
The maximum drawdown's recovery.
"""
valley = np.argmin(underwater) # end of the period
# Find first 0
peak = underwater[:valley][underwater[:valley] == 0].index[-1]
# Find last 0
try:
recovery = underwater[valley:][underwater[valley:] == 0].index[0]
except IndexError:
recovery = np.nan # drawdown not recovered
return peak, valley, recovery | [
"def",
"get_max_drawdown_underwater",
"(",
"underwater",
")",
":",
"valley",
"=",
"np",
".",
"argmin",
"(",
"underwater",
")",
"# end of the period",
"# Find first 0",
"peak",
"=",
"underwater",
"[",
":",
"valley",
"]",
"[",
"underwater",
"[",
":",
"valley",
"]",
"==",
"0",
"]",
".",
"index",
"[",
"-",
"1",
"]",
"# Find last 0",
"try",
":",
"recovery",
"=",
"underwater",
"[",
"valley",
":",
"]",
"[",
"underwater",
"[",
"valley",
":",
"]",
"==",
"0",
"]",
".",
"index",
"[",
"0",
"]",
"except",
"IndexError",
":",
"recovery",
"=",
"np",
".",
"nan",
"# drawdown not recovered",
"return",
"peak",
",",
"valley",
",",
"recovery"
] | 27.5 | 0.001098 | [
"def get_max_drawdown_underwater(underwater):\n",
" \"\"\"\n",
" Determines peak, valley, and recovery dates given an 'underwater'\n",
" DataFrame.\n",
"\n",
" An underwater DataFrame is a DataFrame that has precomputed\n",
" rolling drawdown.\n",
"\n",
" Parameters\n",
" ----------\n",
" underwater : pd.Series\n",
" Underwater returns (rolling drawdown) of a strategy.\n",
"\n",
" Returns\n",
" -------\n",
" peak : datetime\n",
" The maximum drawdown's peak.\n",
" valley : datetime\n",
" The maximum drawdown's valley.\n",
" recovery : datetime\n",
" The maximum drawdown's recovery.\n",
" \"\"\"\n",
"\n",
" valley = np.argmin(underwater) # end of the period\n",
" # Find first 0\n",
" peak = underwater[:valley][underwater[:valley] == 0].index[-1]\n",
" # Find last 0\n",
" try:\n",
" recovery = underwater[valley:][underwater[valley:] == 0].index[0]\n",
" except IndexError:\n",
" recovery = np.nan # drawdown not recovered\n",
" return peak, valley, recovery"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304
] | 32 | 0.000947 |
def insert_column(self, name, data, colnum=None):
"""
Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well.
"""
if name in self._colnames:
raise ValueError("column '%s' already exists" % name)
if IS_PY3 and data.dtype.char == 'U':
# fast dtype conversion using an empty array
# we could hack at the actual text description, but using
# the numpy API is probably safer
# this also avoids doing a dtype conversion on every array
# element which could b expensive
descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr
else:
descr = data.dtype.descr
if len(descr) > 1:
raise ValueError("you can only insert a single column, "
"requested: %s" % descr)
this_descr = descr[0]
this_descr = [name, this_descr[1]]
if len(data.shape) > 1:
this_descr += [data.shape[1:]]
this_descr = tuple(this_descr)
name, fmt, dims = _npy2fits(
this_descr,
table_type=self._table_type_str)
if dims is not None:
dims = [dims]
if colnum is None:
new_colnum = len(self._info['colinfo']) + 1
else:
new_colnum = colnum+1
self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
self._update_info()
self.write_column(name, data) | [
"def",
"insert_column",
"(",
"self",
",",
"name",
",",
"data",
",",
"colnum",
"=",
"None",
")",
":",
"if",
"name",
"in",
"self",
".",
"_colnames",
":",
"raise",
"ValueError",
"(",
"\"column '%s' already exists\"",
"%",
"name",
")",
"if",
"IS_PY3",
"and",
"data",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# fast dtype conversion using an empty array",
"# we could hack at the actual text description, but using",
"# the numpy API is probably safer",
"# this also avoids doing a dtype conversion on every array",
"# element which could b expensive",
"descr",
"=",
"numpy",
".",
"empty",
"(",
"1",
")",
".",
"astype",
"(",
"data",
".",
"dtype",
")",
".",
"astype",
"(",
"'S'",
")",
".",
"dtype",
".",
"descr",
"else",
":",
"descr",
"=",
"data",
".",
"dtype",
".",
"descr",
"if",
"len",
"(",
"descr",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"you can only insert a single column, \"",
"\"requested: %s\"",
"%",
"descr",
")",
"this_descr",
"=",
"descr",
"[",
"0",
"]",
"this_descr",
"=",
"[",
"name",
",",
"this_descr",
"[",
"1",
"]",
"]",
"if",
"len",
"(",
"data",
".",
"shape",
")",
">",
"1",
":",
"this_descr",
"+=",
"[",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"]",
"this_descr",
"=",
"tuple",
"(",
"this_descr",
")",
"name",
",",
"fmt",
",",
"dims",
"=",
"_npy2fits",
"(",
"this_descr",
",",
"table_type",
"=",
"self",
".",
"_table_type_str",
")",
"if",
"dims",
"is",
"not",
"None",
":",
"dims",
"=",
"[",
"dims",
"]",
"if",
"colnum",
"is",
"None",
":",
"new_colnum",
"=",
"len",
"(",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
")",
"+",
"1",
"else",
":",
"new_colnum",
"=",
"colnum",
"+",
"1",
"self",
".",
"_FITS",
".",
"insert_col",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"new_colnum",
",",
"name",
",",
"fmt",
",",
"tdim",
"=",
"dims",
")",
"self",
".",
"_update_info",
"(",
")",
"self",
".",
"write_column",
"(",
"name",
",",
"data",
")"
] | 32.196429 | 0.001076 | [
"def insert_column(self, name, data, colnum=None):\n",
" \"\"\"\n",
" Insert a new column.\n",
"\n",
" parameters\n",
" ----------\n",
" name: string\n",
" The column name\n",
" data:\n",
" The data to write into the new column.\n",
" colnum: int, optional\n",
" The column number for the new column, zero-offset. Default\n",
" is to add the new column after the existing ones.\n",
"\n",
" Notes\n",
" -----\n",
" This method is used un-modified by ascii tables as well.\n",
" \"\"\"\n",
"\n",
" if name in self._colnames:\n",
" raise ValueError(\"column '%s' already exists\" % name)\n",
"\n",
" if IS_PY3 and data.dtype.char == 'U':\n",
" # fast dtype conversion using an empty array\n",
" # we could hack at the actual text description, but using\n",
" # the numpy API is probably safer\n",
" # this also avoids doing a dtype conversion on every array\n",
" # element which could b expensive\n",
" descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr\n",
" else:\n",
" descr = data.dtype.descr\n",
"\n",
" if len(descr) > 1:\n",
" raise ValueError(\"you can only insert a single column, \"\n",
" \"requested: %s\" % descr)\n",
"\n",
" this_descr = descr[0]\n",
" this_descr = [name, this_descr[1]]\n",
" if len(data.shape) > 1:\n",
" this_descr += [data.shape[1:]]\n",
" this_descr = tuple(this_descr)\n",
"\n",
" name, fmt, dims = _npy2fits(\n",
" this_descr,\n",
" table_type=self._table_type_str)\n",
" if dims is not None:\n",
" dims = [dims]\n",
"\n",
" if colnum is None:\n",
" new_colnum = len(self._info['colinfo']) + 1\n",
" else:\n",
" new_colnum = colnum+1\n",
" self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)\n",
" self._update_info()\n",
"\n",
" self.write_column(name, data)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703
] | 56 | 0.001971 |
def with_column(self, label, values, *rest):
"""Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``rest``: An alternating list of labels and values describing
additional columns. See with_columns for a full description.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# Ensure that if with_column is called instead of with_columns;
# no error is raised.
if rest:
return self.with_columns(label, values, *rest)
new_table = self.copy()
new_table.append_column(label, values)
return new_table | [
"def",
"with_column",
"(",
"self",
",",
"label",
",",
"values",
",",
"*",
"rest",
")",
":",
"# Ensure that if with_column is called instead of with_columns;",
"# no error is raised.",
"if",
"rest",
":",
"return",
"self",
".",
"with_columns",
"(",
"label",
",",
"values",
",",
"*",
"rest",
")",
"new_table",
"=",
"self",
".",
"copy",
"(",
")",
"new_table",
".",
"append_column",
"(",
"label",
",",
"values",
")",
"return",
"new_table"
] | 37.79661 | 0.001311 | [
"def with_column(self, label, values, *rest):\n",
" \"\"\"Return a new table with an additional or replaced column.\n",
"\n",
" Args:\n",
" ``label`` (str): The column label. If an existing label is used,\n",
" the existing column will be replaced in the new table.\n",
"\n",
" ``values`` (single value or sequence): If a single value, every\n",
" value in the new column is ``values``. If sequence of values,\n",
" new column takes on values in ``values``.\n",
"\n",
" ``rest``: An alternating list of labels and values describing\n",
" additional columns. See with_columns for a full description.\n",
"\n",
" Raises:\n",
" ``ValueError``: If\n",
" - ``label`` is not a valid column name\n",
" - if ``label`` is not of type (str)\n",
" - ``values`` is a list/array that does not have the same\n",
" length as the number of rows in the table.\n",
"\n",
" Returns:\n",
" copy of original table with new or replaced column\n",
"\n",
" >>> alphabet = Table().with_column('letter', make_array('c','d'))\n",
" >>> alphabet = alphabet.with_column('count', make_array(2, 4))\n",
" >>> alphabet\n",
" letter | count\n",
" c | 2\n",
" d | 4\n",
" >>> alphabet.with_column('permutes', make_array('a', 'g'))\n",
" letter | count | permutes\n",
" c | 2 | a\n",
" d | 4 | g\n",
" >>> alphabet\n",
" letter | count\n",
" c | 2\n",
" d | 4\n",
" >>> alphabet.with_column('count', 1)\n",
" letter | count\n",
" c | 1\n",
" d | 1\n",
" >>> alphabet.with_column(1, make_array(1, 2))\n",
" Traceback (most recent call last):\n",
" ...\n",
" ValueError: The column label must be a string, but a int was given\n",
" >>> alphabet.with_column('bad_col', make_array(1))\n",
" Traceback (most recent call last):\n",
" ...\n",
" ValueError: Column length mismatch. New column does not have the same number of rows as table.\n",
" \"\"\"\n",
" # Ensure that if with_column is called instead of with_columns;\n",
" # no error is raised.\n",
" if rest:\n",
" return self.with_columns(label, values, *rest)\n",
"\n",
" new_table = self.copy()\n",
" new_table.append_column(label, values)\n",
" return new_table"
] | [
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664
] | 59 | 0.001116 |
def get_tokenizer(fhp # type: Optional[field_formats.FieldHashingProperties]
):
# type: (...) -> Callable[[Text, Optional[Text]], Iterable[Text]]
""" Get tokeniser function from the hash settings.
This function takes a FieldHashingProperties object. It returns a
function that takes a string and tokenises based on those properties.
"""
def dummy(word, ignore=None):
# type: (Text, Optional[Text]) -> Iterable[Text]
"""
Null tokenizer returns empty Iterable.
FieldSpec Ignore has hashing_properties = None
and get_tokenizer has to return something for this case,
even though it's never called. An alternative would be to
use an Optional[Callable]].
:param word: not used
:param ignore: not used
:return: empty Iterable
"""
return ('' for i in range(0))
if not fhp:
return dummy
n = fhp.ngram
if n < 0:
raise ValueError('`n` in `n`-gram must be non-negative.')
positional = fhp.positional
def tok(word, ignore=None):
# type: (Text, Optional[Text]) -> Iterable[Text]
""" Produce `n`-grams of `word`.
:param word: The string to tokenize.
:param ignore: The substring whose occurrences we remove from
`word` before tokenization.
:return: Tuple of n-gram strings.
"""
if ignore is not None:
word = word.replace(ignore, '')
if n > 1:
word = ' {} '.format(word)
if positional:
# These are 1-indexed.
return ('{} {}'.format(i + 1, word[i:i + n])
for i in range(len(word) - n + 1))
else:
return (word[i:i + n] for i in range(len(word) - n + 1))
return tok | [
"def",
"get_tokenizer",
"(",
"fhp",
"# type: Optional[field_formats.FieldHashingProperties]",
")",
":",
"# type: (...) -> Callable[[Text, Optional[Text]], Iterable[Text]]",
"def",
"dummy",
"(",
"word",
",",
"ignore",
"=",
"None",
")",
":",
"# type: (Text, Optional[Text]) -> Iterable[Text]",
"\"\"\"\n Null tokenizer returns empty Iterable.\n FieldSpec Ignore has hashing_properties = None\n and get_tokenizer has to return something for this case,\n even though it's never called. An alternative would be to\n use an Optional[Callable]].\n :param word: not used\n :param ignore: not used\n :return: empty Iterable\n \"\"\"",
"return",
"(",
"''",
"for",
"i",
"in",
"range",
"(",
"0",
")",
")",
"if",
"not",
"fhp",
":",
"return",
"dummy",
"n",
"=",
"fhp",
".",
"ngram",
"if",
"n",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'`n` in `n`-gram must be non-negative.'",
")",
"positional",
"=",
"fhp",
".",
"positional",
"def",
"tok",
"(",
"word",
",",
"ignore",
"=",
"None",
")",
":",
"# type: (Text, Optional[Text]) -> Iterable[Text]",
"\"\"\" Produce `n`-grams of `word`.\n\n :param word: The string to tokenize.\n :param ignore: The substring whose occurrences we remove from\n `word` before tokenization.\n :return: Tuple of n-gram strings.\n \"\"\"",
"if",
"ignore",
"is",
"not",
"None",
":",
"word",
"=",
"word",
".",
"replace",
"(",
"ignore",
",",
"''",
")",
"if",
"n",
">",
"1",
":",
"word",
"=",
"' {} '",
".",
"format",
"(",
"word",
")",
"if",
"positional",
":",
"# These are 1-indexed.",
"return",
"(",
"'{} {}'",
".",
"format",
"(",
"i",
"+",
"1",
",",
"word",
"[",
"i",
":",
"i",
"+",
"n",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"word",
")",
"-",
"n",
"+",
"1",
")",
")",
"else",
":",
"return",
"(",
"word",
"[",
"i",
":",
"i",
"+",
"n",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"word",
")",
"-",
"n",
"+",
"1",
")",
")",
"return",
"tok"
] | 32.309091 | 0.000546 | [
"def get_tokenizer(fhp # type: Optional[field_formats.FieldHashingProperties]\n",
" ):\n",
" # type: (...) -> Callable[[Text, Optional[Text]], Iterable[Text]]\n",
" \"\"\" Get tokeniser function from the hash settings.\n",
"\n",
" This function takes a FieldHashingProperties object. It returns a\n",
" function that takes a string and tokenises based on those properties.\n",
" \"\"\"\n",
"\n",
" def dummy(word, ignore=None):\n",
" # type: (Text, Optional[Text]) -> Iterable[Text]\n",
" \"\"\"\n",
" Null tokenizer returns empty Iterable.\n",
" FieldSpec Ignore has hashing_properties = None\n",
" and get_tokenizer has to return something for this case,\n",
" even though it's never called. An alternative would be to\n",
" use an Optional[Callable]].\n",
" :param word: not used\n",
" :param ignore: not used\n",
" :return: empty Iterable\n",
" \"\"\"\n",
" return ('' for i in range(0))\n",
"\n",
" if not fhp:\n",
" return dummy\n",
"\n",
" n = fhp.ngram\n",
" if n < 0:\n",
" raise ValueError('`n` in `n`-gram must be non-negative.')\n",
"\n",
" positional = fhp.positional\n",
"\n",
" def tok(word, ignore=None):\n",
" # type: (Text, Optional[Text]) -> Iterable[Text]\n",
" \"\"\" Produce `n`-grams of `word`.\n",
"\n",
" :param word: The string to tokenize.\n",
" :param ignore: The substring whose occurrences we remove from\n",
" `word` before tokenization.\n",
" :return: Tuple of n-gram strings.\n",
" \"\"\"\n",
" if ignore is not None:\n",
" word = word.replace(ignore, '')\n",
"\n",
" if n > 1:\n",
" word = ' {} '.format(word)\n",
"\n",
" if positional:\n",
" # These are 1-indexed.\n",
" return ('{} {}'.format(i + 1, word[i:i + n])\n",
" for i in range(len(word) - n + 1))\n",
" else:\n",
" return (word[i:i + n] for i in range(len(word) - n + 1))\n",
"\n",
" return tok"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 55 | 0.001299 |
async def get_codec(self):
"""Get codec settings."""
act = self.service.action("X_GetCodec")
res = await act.async_call()
return res | [
"async",
"def",
"get_codec",
"(",
"self",
")",
":",
"act",
"=",
"self",
".",
"service",
".",
"action",
"(",
"\"X_GetCodec\"",
")",
"res",
"=",
"await",
"act",
".",
"async_call",
"(",
")",
"return",
"res"
] | 32 | 0.012195 | [
"async def get_codec(self):\n",
" \"\"\"Get codec settings.\"\"\"\n",
" act = self.service.action(\"X_GetCodec\")\n",
" res = await act.async_call()\n",
" return res"
] | [
0,
0.029411764705882353,
0,
0,
0.05555555555555555
] | 5 | 0.016993 |
def _pastorestr(ins):
''' Stores a string value into a memory address.
It copies content of 2nd operand (string), into 1st, reallocating
dynamic memory for the 1st str. These instruction DOES ALLOW
inmediate strings for the 2nd parameter, starting with '#'.
'''
output = _paddr(ins.quad[1])
temporal = False
value = ins.quad[2]
indirect = value[0] == '*'
if indirect:
value = value[1:]
immediate = value[0]
if immediate:
value = value[1:]
if value[0] == '_':
if indirect:
if immediate:
output.append('ld de, (%s)' % value)
else:
output.append('ld de, (%s)' % value)
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
else:
if immediate:
output.append('ld de, %s' % value)
else:
output.append('ld de, (%s)' % value)
else:
output.append('pop de')
temporal = True
if indirect:
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
if not temporal:
output.append('call __STORE_STR')
REQUIRES.add('storestr.asm')
else: # A value already on dynamic memory
output.append('call __STORE_STR2')
REQUIRES.add('storestr2.asm')
return output | [
"def",
"_pastorestr",
"(",
"ins",
")",
":",
"output",
"=",
"_paddr",
"(",
"ins",
".",
"quad",
"[",
"1",
"]",
")",
"temporal",
"=",
"False",
"value",
"=",
"ins",
".",
"quad",
"[",
"2",
"]",
"indirect",
"=",
"value",
"[",
"0",
"]",
"==",
"'*'",
"if",
"indirect",
":",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"immediate",
"=",
"value",
"[",
"0",
"]",
"if",
"immediate",
":",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"if",
"value",
"[",
"0",
"]",
"==",
"'_'",
":",
"if",
"indirect",
":",
"if",
"immediate",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"value",
")",
"else",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"value",
")",
"output",
".",
"append",
"(",
"'call __LOAD_DE_DE'",
")",
"REQUIRES",
".",
"add",
"(",
"'lddede.asm'",
")",
"else",
":",
"if",
"immediate",
":",
"output",
".",
"append",
"(",
"'ld de, %s'",
"%",
"value",
")",
"else",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"value",
")",
"else",
":",
"output",
".",
"append",
"(",
"'pop de'",
")",
"temporal",
"=",
"True",
"if",
"indirect",
":",
"output",
".",
"append",
"(",
"'call __LOAD_DE_DE'",
")",
"REQUIRES",
".",
"add",
"(",
"'lddede.asm'",
")",
"if",
"not",
"temporal",
":",
"output",
".",
"append",
"(",
"'call __STORE_STR'",
")",
"REQUIRES",
".",
"add",
"(",
"'storestr.asm'",
")",
"else",
":",
"# A value already on dynamic memory",
"output",
".",
"append",
"(",
"'call __STORE_STR2'",
")",
"REQUIRES",
".",
"add",
"(",
"'storestr2.asm'",
")",
"return",
"output"
] | 28.340426 | 0.000726 | [
"def _pastorestr(ins):\n",
" ''' Stores a string value into a memory address.\n",
" It copies content of 2nd operand (string), into 1st, reallocating\n",
" dynamic memory for the 1st str. These instruction DOES ALLOW\n",
" inmediate strings for the 2nd parameter, starting with '#'.\n",
" '''\n",
" output = _paddr(ins.quad[1])\n",
" temporal = False\n",
" value = ins.quad[2]\n",
"\n",
" indirect = value[0] == '*'\n",
" if indirect:\n",
" value = value[1:]\n",
"\n",
" immediate = value[0]\n",
" if immediate:\n",
" value = value[1:]\n",
"\n",
" if value[0] == '_':\n",
" if indirect:\n",
" if immediate:\n",
" output.append('ld de, (%s)' % value)\n",
" else:\n",
" output.append('ld de, (%s)' % value)\n",
" output.append('call __LOAD_DE_DE')\n",
" REQUIRES.add('lddede.asm')\n",
" else:\n",
" if immediate:\n",
" output.append('ld de, %s' % value)\n",
" else:\n",
" output.append('ld de, (%s)' % value)\n",
" else:\n",
" output.append('pop de')\n",
" temporal = True\n",
"\n",
" if indirect:\n",
" output.append('call __LOAD_DE_DE')\n",
" REQUIRES.add('lddede.asm')\n",
"\n",
" if not temporal:\n",
" output.append('call __STORE_STR')\n",
" REQUIRES.add('storestr.asm')\n",
" else: # A value already on dynamic memory\n",
" output.append('call __STORE_STR2')\n",
" REQUIRES.add('storestr2.asm')\n",
"\n",
" return output"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 47 | 0.001252 |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data["properties"]
raw_content = properties.get("addressSpace", None)
if raw_content is not None:
address_space = AddressSpace.from_raw_data(raw_content)
properties["addressSpace"] = address_space
raw_content = properties.get("dhcpOptions")
if raw_content is not None:
dhcp_options = DHCPOptions.from_raw_data(raw_content)
properties["dhcpOptions"] = dhcp_options
raw_content = properties.get("logicalNetwork", None)
if raw_content is not None:
properties["logicalNetwork"] = Resource.from_raw_data(raw_content)
subnetworks = []
for raw_subnet in properties.get("subnets", []):
raw_subnet["parentResourceID"] = raw_data["resourceId"]
subnetworks.append(SubNetworks.from_raw_data(raw_subnet))
properties["subnets"] = subnetworks
return super(VirtualNetworks, cls).process_raw_data(raw_data) | [
"def",
"process_raw_data",
"(",
"cls",
",",
"raw_data",
")",
":",
"properties",
"=",
"raw_data",
"[",
"\"properties\"",
"]",
"raw_content",
"=",
"properties",
".",
"get",
"(",
"\"addressSpace\"",
",",
"None",
")",
"if",
"raw_content",
"is",
"not",
"None",
":",
"address_space",
"=",
"AddressSpace",
".",
"from_raw_data",
"(",
"raw_content",
")",
"properties",
"[",
"\"addressSpace\"",
"]",
"=",
"address_space",
"raw_content",
"=",
"properties",
".",
"get",
"(",
"\"dhcpOptions\"",
")",
"if",
"raw_content",
"is",
"not",
"None",
":",
"dhcp_options",
"=",
"DHCPOptions",
".",
"from_raw_data",
"(",
"raw_content",
")",
"properties",
"[",
"\"dhcpOptions\"",
"]",
"=",
"dhcp_options",
"raw_content",
"=",
"properties",
".",
"get",
"(",
"\"logicalNetwork\"",
",",
"None",
")",
"if",
"raw_content",
"is",
"not",
"None",
":",
"properties",
"[",
"\"logicalNetwork\"",
"]",
"=",
"Resource",
".",
"from_raw_data",
"(",
"raw_content",
")",
"subnetworks",
"=",
"[",
"]",
"for",
"raw_subnet",
"in",
"properties",
".",
"get",
"(",
"\"subnets\"",
",",
"[",
"]",
")",
":",
"raw_subnet",
"[",
"\"parentResourceID\"",
"]",
"=",
"raw_data",
"[",
"\"resourceId\"",
"]",
"subnetworks",
".",
"append",
"(",
"SubNetworks",
".",
"from_raw_data",
"(",
"raw_subnet",
")",
")",
"properties",
"[",
"\"subnets\"",
"]",
"=",
"subnetworks",
"return",
"super",
"(",
"VirtualNetworks",
",",
"cls",
")",
".",
"process_raw_data",
"(",
"raw_data",
")"
] | 42.12 | 0.001857 | [
"def process_raw_data(cls, raw_data):\n",
" \"\"\"Create a new model using raw API response.\"\"\"\n",
" properties = raw_data[\"properties\"]\n",
"\n",
" raw_content = properties.get(\"addressSpace\", None)\n",
" if raw_content is not None:\n",
" address_space = AddressSpace.from_raw_data(raw_content)\n",
" properties[\"addressSpace\"] = address_space\n",
"\n",
" raw_content = properties.get(\"dhcpOptions\")\n",
" if raw_content is not None:\n",
" dhcp_options = DHCPOptions.from_raw_data(raw_content)\n",
" properties[\"dhcpOptions\"] = dhcp_options\n",
"\n",
" raw_content = properties.get(\"logicalNetwork\", None)\n",
" if raw_content is not None:\n",
" properties[\"logicalNetwork\"] = Resource.from_raw_data(raw_content)\n",
"\n",
" subnetworks = []\n",
" for raw_subnet in properties.get(\"subnets\", []):\n",
" raw_subnet[\"parentResourceID\"] = raw_data[\"resourceId\"]\n",
" subnetworks.append(SubNetworks.from_raw_data(raw_subnet))\n",
" properties[\"subnets\"] = subnetworks\n",
"\n",
" return super(VirtualNetworks, cls).process_raw_data(raw_data)"
] | [
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406
] | 25 | 0.001281 |
def invoke(stage, async, dry_run, config_file, args):
"""Invoke the lambda function."""
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
cfn = boto3.client('cloudformation')
lmb = boto3.client('lambda')
try:
stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
raise RuntimeError('This project has not been deployed yet.')
function = _get_from_stack(stack, 'Output', 'FunctionArn')
if dry_run:
invocation_type = 'DryRun'
elif async:
invocation_type = 'Event'
else:
invocation_type = 'RequestResponse'
# parse input arguments
data = {}
for arg in args:
s = arg.split('=', 1)
if len(s) != 2:
raise ValueError('Invalid argument ' + arg)
if s[0][-1] == ':':
# JSON argument
data[s[0][:-1]] = json.loads(s[1])
else:
# string argument
data[s[0]] = s[1]
rv = lmb.invoke(FunctionName=function, InvocationType=invocation_type,
Qualifier=stage,
Payload=json.dumps({'kwargs': data}, sort_keys=True))
if rv['StatusCode'] != 200 and rv['StatusCode'] != 202:
raise RuntimeError('Unexpected error. Status code = {}.'.format(
rv['StatusCode']))
if invocation_type == 'RequestResponse':
payload = json.loads(rv['Payload'].read().decode('utf-8'))
if 'FunctionError' in rv:
if 'stackTrace' in payload:
print('Traceback (most recent call last):')
for frame in payload['stackTrace']:
print(' File "{}", line {}, in {}'.format(
frame[0], frame[1], frame[2]))
print(' ' + frame[3])
print('{}: {}'.format(payload['errorType'],
payload['errorMessage']))
else:
raise RuntimeError('Unknown error')
else:
print(str(payload)) | [
"def",
"invoke",
"(",
"stage",
",",
"async",
",",
"dry_run",
",",
"config_file",
",",
"args",
")",
":",
"config",
"=",
"_load_config",
"(",
"config_file",
")",
"if",
"stage",
"is",
"None",
":",
"stage",
"=",
"config",
"[",
"'devstage'",
"]",
"cfn",
"=",
"boto3",
".",
"client",
"(",
"'cloudformation'",
")",
"lmb",
"=",
"boto3",
".",
"client",
"(",
"'lambda'",
")",
"try",
":",
"stack",
"=",
"cfn",
".",
"describe_stacks",
"(",
"StackName",
"=",
"config",
"[",
"'name'",
"]",
")",
"[",
"'Stacks'",
"]",
"[",
"0",
"]",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
":",
"raise",
"RuntimeError",
"(",
"'This project has not been deployed yet.'",
")",
"function",
"=",
"_get_from_stack",
"(",
"stack",
",",
"'Output'",
",",
"'FunctionArn'",
")",
"if",
"dry_run",
":",
"invocation_type",
"=",
"'DryRun'",
"elif",
"async",
":",
"invocation_type",
"=",
"'Event'",
"else",
":",
"invocation_type",
"=",
"'RequestResponse'",
"# parse input arguments",
"data",
"=",
"{",
"}",
"for",
"arg",
"in",
"args",
":",
"s",
"=",
"arg",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"len",
"(",
"s",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Invalid argument '",
"+",
"arg",
")",
"if",
"s",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"==",
"':'",
":",
"# JSON argument",
"data",
"[",
"s",
"[",
"0",
"]",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"json",
".",
"loads",
"(",
"s",
"[",
"1",
"]",
")",
"else",
":",
"# string argument",
"data",
"[",
"s",
"[",
"0",
"]",
"]",
"=",
"s",
"[",
"1",
"]",
"rv",
"=",
"lmb",
".",
"invoke",
"(",
"FunctionName",
"=",
"function",
",",
"InvocationType",
"=",
"invocation_type",
",",
"Qualifier",
"=",
"stage",
",",
"Payload",
"=",
"json",
".",
"dumps",
"(",
"{",
"'kwargs'",
":",
"data",
"}",
",",
"sort_keys",
"=",
"True",
")",
")",
"if",
"rv",
"[",
"'StatusCode'",
"]",
"!=",
"200",
"and",
"rv",
"[",
"'StatusCode'",
"]",
"!=",
"202",
":",
"raise",
"RuntimeError",
"(",
"'Unexpected error. Status code = {}.'",
".",
"format",
"(",
"rv",
"[",
"'StatusCode'",
"]",
")",
")",
"if",
"invocation_type",
"==",
"'RequestResponse'",
":",
"payload",
"=",
"json",
".",
"loads",
"(",
"rv",
"[",
"'Payload'",
"]",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"if",
"'FunctionError'",
"in",
"rv",
":",
"if",
"'stackTrace'",
"in",
"payload",
":",
"print",
"(",
"'Traceback (most recent call last):'",
")",
"for",
"frame",
"in",
"payload",
"[",
"'stackTrace'",
"]",
":",
"print",
"(",
"' File \"{}\", line {}, in {}'",
".",
"format",
"(",
"frame",
"[",
"0",
"]",
",",
"frame",
"[",
"1",
"]",
",",
"frame",
"[",
"2",
"]",
")",
")",
"print",
"(",
"' '",
"+",
"frame",
"[",
"3",
"]",
")",
"print",
"(",
"'{}: {}'",
".",
"format",
"(",
"payload",
"[",
"'errorType'",
"]",
",",
"payload",
"[",
"'errorMessage'",
"]",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Unknown error'",
")",
"else",
":",
"print",
"(",
"str",
"(",
"payload",
")",
")"
] | 36.232143 | 0.00144 | [
"def invoke(stage, async, dry_run, config_file, args):\n",
" \"\"\"Invoke the lambda function.\"\"\"\n",
" config = _load_config(config_file)\n",
" if stage is None:\n",
" stage = config['devstage']\n",
"\n",
" cfn = boto3.client('cloudformation')\n",
" lmb = boto3.client('lambda')\n",
"\n",
" try:\n",
" stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0]\n",
" except botocore.exceptions.ClientError:\n",
" raise RuntimeError('This project has not been deployed yet.')\n",
" function = _get_from_stack(stack, 'Output', 'FunctionArn')\n",
"\n",
" if dry_run:\n",
" invocation_type = 'DryRun'\n",
" elif async:\n",
" invocation_type = 'Event'\n",
" else:\n",
" invocation_type = 'RequestResponse'\n",
"\n",
" # parse input arguments\n",
" data = {}\n",
" for arg in args:\n",
" s = arg.split('=', 1)\n",
" if len(s) != 2:\n",
" raise ValueError('Invalid argument ' + arg)\n",
" if s[0][-1] == ':':\n",
" # JSON argument\n",
" data[s[0][:-1]] = json.loads(s[1])\n",
" else:\n",
" # string argument\n",
" data[s[0]] = s[1]\n",
"\n",
" rv = lmb.invoke(FunctionName=function, InvocationType=invocation_type,\n",
" Qualifier=stage,\n",
" Payload=json.dumps({'kwargs': data}, sort_keys=True))\n",
" if rv['StatusCode'] != 200 and rv['StatusCode'] != 202:\n",
" raise RuntimeError('Unexpected error. Status code = {}.'.format(\n",
" rv['StatusCode']))\n",
" if invocation_type == 'RequestResponse':\n",
" payload = json.loads(rv['Payload'].read().decode('utf-8'))\n",
" if 'FunctionError' in rv:\n",
" if 'stackTrace' in payload:\n",
" print('Traceback (most recent call last):')\n",
" for frame in payload['stackTrace']:\n",
" print(' File \"{}\", line {}, in {}'.format(\n",
" frame[0], frame[1], frame[2]))\n",
" print(' ' + frame[3])\n",
" print('{}: {}'.format(payload['errorType'],\n",
" payload['errorMessage']))\n",
" else:\n",
" raise RuntimeError('Unknown error')\n",
" else:\n",
" print(str(payload))"
] | [
0.018518518518518517,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903
] | 56 | 0.002023 |
def iter_open(cls, name=None, interface_class=None, interface_subclass=None,
interface_protocol=None, serial_number=None, port_path=None,
default_timeout_ms=None):
"""Find and yield locally connected devices that match.
Note that devices are opened (and interfaces claimd) as they are yielded.
Any devices yielded must be Close()'d.
Args:
name: Name to give *all* returned handles, used for logging only.
interface_class: USB interface_class to match.
interface_subclass: USB interface_subclass to match.
interface_protocol: USB interface_protocol to match.
serial_number: USB serial_number to match.
port_path: USB Port path to match, like X-X.X.X
default_timeout_ms: Default timeout in milliseconds of reads/writes on
the handles yielded.
Yields:
UsbHandle instances that match any non-None args given.
Raises:
LibusbWrappingError: When a libusb call errors during open.
"""
ctx = usb1.USBContext()
try:
devices = ctx.getDeviceList(skip_on_error=True)
except libusb1.USBError as exception:
raise usb_exceptions.LibusbWrappingError(
exception, 'Open(name=%s, class=%s, subclass=%s, protocol=%s, '
'serial=%s, port=%s) failed', name, interface_class,
interface_subclass, interface_protocol, serial_number, port_path)
for device in devices:
try:
if (serial_number is not None and
device.getSerialNumber() != serial_number):
continue
if (port_path is not None and
cls._device_to_sysfs_path(device) != port_path):
continue
for setting in device.iterSettings():
if (interface_class is not None and
setting.getClass() != interface_class):
continue
if (interface_subclass is not None and
setting.getSubClass() != interface_subclass):
continue
if (interface_protocol is not None and
setting.getProtocol() != interface_protocol):
continue
yield cls(device, setting, name=name,
default_timeout_ms=default_timeout_ms)
except libusb1.USBError as exception:
if (exception.value !=
libusb1.libusb_error.forward_dict['LIBUSB_ERROR_ACCESS']):
raise | [
"def",
"iter_open",
"(",
"cls",
",",
"name",
"=",
"None",
",",
"interface_class",
"=",
"None",
",",
"interface_subclass",
"=",
"None",
",",
"interface_protocol",
"=",
"None",
",",
"serial_number",
"=",
"None",
",",
"port_path",
"=",
"None",
",",
"default_timeout_ms",
"=",
"None",
")",
":",
"ctx",
"=",
"usb1",
".",
"USBContext",
"(",
")",
"try",
":",
"devices",
"=",
"ctx",
".",
"getDeviceList",
"(",
"skip_on_error",
"=",
"True",
")",
"except",
"libusb1",
".",
"USBError",
"as",
"exception",
":",
"raise",
"usb_exceptions",
".",
"LibusbWrappingError",
"(",
"exception",
",",
"'Open(name=%s, class=%s, subclass=%s, protocol=%s, '",
"'serial=%s, port=%s) failed'",
",",
"name",
",",
"interface_class",
",",
"interface_subclass",
",",
"interface_protocol",
",",
"serial_number",
",",
"port_path",
")",
"for",
"device",
"in",
"devices",
":",
"try",
":",
"if",
"(",
"serial_number",
"is",
"not",
"None",
"and",
"device",
".",
"getSerialNumber",
"(",
")",
"!=",
"serial_number",
")",
":",
"continue",
"if",
"(",
"port_path",
"is",
"not",
"None",
"and",
"cls",
".",
"_device_to_sysfs_path",
"(",
"device",
")",
"!=",
"port_path",
")",
":",
"continue",
"for",
"setting",
"in",
"device",
".",
"iterSettings",
"(",
")",
":",
"if",
"(",
"interface_class",
"is",
"not",
"None",
"and",
"setting",
".",
"getClass",
"(",
")",
"!=",
"interface_class",
")",
":",
"continue",
"if",
"(",
"interface_subclass",
"is",
"not",
"None",
"and",
"setting",
".",
"getSubClass",
"(",
")",
"!=",
"interface_subclass",
")",
":",
"continue",
"if",
"(",
"interface_protocol",
"is",
"not",
"None",
"and",
"setting",
".",
"getProtocol",
"(",
")",
"!=",
"interface_protocol",
")",
":",
"continue",
"yield",
"cls",
"(",
"device",
",",
"setting",
",",
"name",
"=",
"name",
",",
"default_timeout_ms",
"=",
"default_timeout_ms",
")",
"except",
"libusb1",
".",
"USBError",
"as",
"exception",
":",
"if",
"(",
"exception",
".",
"value",
"!=",
"libusb1",
".",
"libusb_error",
".",
"forward_dict",
"[",
"'LIBUSB_ERROR_ACCESS'",
"]",
")",
":",
"raise"
] | 38.5 | 0.008442 | [
"def iter_open(cls, name=None, interface_class=None, interface_subclass=None,\n",
" interface_protocol=None, serial_number=None, port_path=None,\n",
" default_timeout_ms=None):\n",
" \"\"\"Find and yield locally connected devices that match.\n",
"\n",
" Note that devices are opened (and interfaces claimd) as they are yielded.\n",
" Any devices yielded must be Close()'d.\n",
"\n",
" Args:\n",
" name: Name to give *all* returned handles, used for logging only.\n",
" interface_class: USB interface_class to match.\n",
" interface_subclass: USB interface_subclass to match.\n",
" interface_protocol: USB interface_protocol to match.\n",
" serial_number: USB serial_number to match.\n",
" port_path: USB Port path to match, like X-X.X.X\n",
" default_timeout_ms: Default timeout in milliseconds of reads/writes on\n",
" the handles yielded.\n",
"\n",
" Yields:\n",
" UsbHandle instances that match any non-None args given.\n",
"\n",
" Raises:\n",
" LibusbWrappingError: When a libusb call errors during open.\n",
" \"\"\"\n",
" ctx = usb1.USBContext()\n",
" try:\n",
" devices = ctx.getDeviceList(skip_on_error=True)\n",
" except libusb1.USBError as exception:\n",
" raise usb_exceptions.LibusbWrappingError(\n",
" exception, 'Open(name=%s, class=%s, subclass=%s, protocol=%s, '\n",
" 'serial=%s, port=%s) failed', name, interface_class,\n",
" interface_subclass, interface_protocol, serial_number, port_path)\n",
"\n",
" for device in devices:\n",
" try:\n",
" if (serial_number is not None and\n",
" device.getSerialNumber() != serial_number):\n",
" continue\n",
"\n",
" if (port_path is not None and\n",
" cls._device_to_sysfs_path(device) != port_path):\n",
" continue\n",
"\n",
" for setting in device.iterSettings():\n",
" if (interface_class is not None and\n",
" setting.getClass() != interface_class):\n",
" continue\n",
" if (interface_subclass is not None and\n",
" setting.getSubClass() != interface_subclass):\n",
" continue\n",
" if (interface_protocol is not None and\n",
" setting.getProtocol() != interface_protocol):\n",
" continue\n",
"\n",
" yield cls(device, setting, name=name,\n",
" default_timeout_ms=default_timeout_ms)\n",
" except libusb1.USBError as exception:\n",
" if (exception.value !=\n",
" libusb1.libusb_error.forward_dict['LIBUSB_ERROR_ACCESS']):\n",
" raise"
] | [
0,
0.012987012987012988,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018518518518518517,
0,
0.020833333333333332,
0,
0,
0,
0,
0,
0.09090909090909091,
0,
0.017857142857142856,
0.05263157894736842,
0,
0,
0.01639344262295082,
0.05263157894736842,
0,
0,
0.021739130434782608,
0.018518518518518517,
0,
0.02040816326530612,
0.016666666666666666,
0,
0.02040816326530612,
0.016666666666666666,
0,
0,
0.020833333333333332,
0,
0.022727272727272728,
0,
0.014084507042253521,
0.13333333333333333
] | 60 | 0.010199 |
def camel2snake(name:str)->str:
"Change `name` from camel to snake style."
s1 = re.sub(_camel_re1, r'\1_\2', name)
return re.sub(_camel_re2, r'\1_\2', s1).lower() | [
"def",
"camel2snake",
"(",
"name",
":",
"str",
")",
"->",
"str",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"_camel_re1",
",",
"r'\\1_\\2'",
",",
"name",
")",
"return",
"re",
".",
"sub",
"(",
"_camel_re2",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] | 42.75 | 0.017241 | [
"def camel2snake(name:str)->str:\n",
" \"Change `name` from camel to snake style.\"\n",
" s1 = re.sub(_camel_re1, r'\\1_\\2', name)\n",
" return re.sub(_camel_re2, r'\\1_\\2', s1).lower()"
] | [
0.0625,
0,
0,
0.0196078431372549
] | 4 | 0.020527 |
def get_ratings(self):
"""get_ratings()
Returns a Vote QuerySet for this rating field."""
return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key) | [
"def",
"get_ratings",
"(",
"self",
")",
":",
"return",
"Vote",
".",
"objects",
".",
"filter",
"(",
"content_type",
"=",
"self",
".",
"get_content_type",
"(",
")",
",",
"object_id",
"=",
"self",
".",
"instance",
".",
"pk",
",",
"key",
"=",
"self",
".",
"field",
".",
"key",
")"
] | 46.2 | 0.017021 | [
"def get_ratings(self):\n",
" \"\"\"get_ratings()\n",
" \n",
" Returns a Vote QuerySet for this rating field.\"\"\"\n",
" return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)"
] | [
0,
0.04,
0.1111111111111111,
0,
0.016666666666666666
] | 5 | 0.033556 |
def get_default_recipients(self):
''' Overrides EmailRecipientMixin '''
if self.email:
return [self.email,]
if self.finalRegistration:
return [self.finalRegistration.customer.email,]
elif self.temporaryRegistration:
return [self.temporaryRegistration.email,]
return [] | [
"def",
"get_default_recipients",
"(",
"self",
")",
":",
"if",
"self",
".",
"email",
":",
"return",
"[",
"self",
".",
"email",
",",
"]",
"if",
"self",
".",
"finalRegistration",
":",
"return",
"[",
"self",
".",
"finalRegistration",
".",
"customer",
".",
"email",
",",
"]",
"elif",
"self",
".",
"temporaryRegistration",
":",
"return",
"[",
"self",
".",
"temporaryRegistration",
".",
"email",
",",
"]",
"return",
"[",
"]"
] | 37.333333 | 0.014535 | [
"def get_default_recipients(self):\n",
" ''' Overrides EmailRecipientMixin '''\n",
" if self.email:\n",
" return [self.email,]\n",
" if self.finalRegistration:\n",
" return [self.finalRegistration.customer.email,]\n",
" elif self.temporaryRegistration:\n",
" return [self.temporaryRegistration.email,]\n",
" return []"
] | [
0,
0.021739130434782608,
0,
0.030303030303030304,
0,
0.016666666666666666,
0,
0.01818181818181818,
0.058823529411764705
] | 9 | 0.01619 |
def _get_valid_formats():
''' Calls SoX help for a lists of audio formats available with the current
install of SoX.
Returns:
--------
formats : list
List of audio file extensions that SoX can process.
'''
if NO_SOX:
return []
so = subprocess.check_output(['sox', '-h'])
if type(so) is not str:
so = str(so, encoding='UTF-8')
so = so.split('\n')
idx = [i for i in range(len(so)) if 'AUDIO FILE FORMATS:' in so[i]][0]
formats = so[idx].split(' ')[3:]
return formats | [
"def",
"_get_valid_formats",
"(",
")",
":",
"if",
"NO_SOX",
":",
"return",
"[",
"]",
"so",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'sox'",
",",
"'-h'",
"]",
")",
"if",
"type",
"(",
"so",
")",
"is",
"not",
"str",
":",
"so",
"=",
"str",
"(",
"so",
",",
"encoding",
"=",
"'UTF-8'",
")",
"so",
"=",
"so",
".",
"split",
"(",
"'\\n'",
")",
"idx",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"so",
")",
")",
"if",
"'AUDIO FILE FORMATS:'",
"in",
"so",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"formats",
"=",
"so",
"[",
"idx",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"3",
":",
"]",
"return",
"formats"
] | 24.952381 | 0.001838 | [
"def _get_valid_formats():\n",
" ''' Calls SoX help for a lists of audio formats available with the current\n",
" install of SoX.\n",
"\n",
" Returns:\n",
" --------\n",
" formats : list\n",
" List of audio file extensions that SoX can process.\n",
"\n",
" '''\n",
" if NO_SOX:\n",
" return []\n",
"\n",
" so = subprocess.check_output(['sox', '-h'])\n",
" if type(so) is not str:\n",
" so = str(so, encoding='UTF-8')\n",
" so = so.split('\\n')\n",
" idx = [i for i in range(len(so)) if 'AUDIO FILE FORMATS:' in so[i]][0]\n",
" formats = so[idx].split(' ')[3:]\n",
"\n",
" return formats"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 21 | 0.002646 |
def func_info(task, n_runs, metadata, img, config):
"""
Generate a paragraph describing T2*-weighted functional scans.
Parameters
----------
task : :obj:`str`
The name of the task.
n_runs : :obj:`int`
The number of runs acquired for this task.
metadata : :obj:`dict`
The metadata for the scan from the json associated with the scan.
img : :obj:`nibabel.Nifti1Image`
Image corresponding to one of the runs.
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
Returns
-------
desc : :obj:`str`
A description of the scan's acquisition information.
"""
if metadata.get('MultibandAccelerationFactor', 1) > 1:
mb_str = '; MB factor={}'.format(metadata['MultibandAccelerationFactor'])
else:
mb_str = ''
if metadata.get('ParallelReductionFactorInPlane', 1) > 1:
pr_str = ('; in-plane acceleration factor='
'{}'.format(metadata['ParallelReductionFactorInPlane']))
else:
pr_str = ''
if 'SliceTiming' in metadata.keys():
so_str = ' in {0} order'.format(get_slice_info(metadata['SliceTiming']))
else:
so_str = ''
if 'EchoTime' in metadata.keys():
if isinstance(metadata['EchoTime'], list):
te = [num_to_str(t*1000) for t in metadata['EchoTime']]
te_temp = ', '.join(te[:-1])
te_temp += ', and {}'.format(te[-1])
te = te_temp
me_str = 'multi-echo '
else:
te = num_to_str(metadata['EchoTime']*1000)
me_str = 'single-echo '
else:
te = 'UNKNOWN'
me_str = 'UNKNOWN-echo'
task_name = metadata.get('TaskName', task+' task')
seqs, variants = get_seqstr(config, metadata)
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
tr = metadata['RepetitionTime']
n_tps = img.shape[3]
run_secs = math.ceil(n_tps * tr)
mins, secs = divmod(run_secs, 60)
length = '{0}:{1:02.0f}'.format(int(mins), int(secs))
if n_runs == 1:
run_str = '{0} run'.format(num2words(n_runs).title())
else:
run_str = '{0} runs'.format(num2words(n_runs).title())
desc = '''
{run_str} of {task} {variants} {seqs} {me_str} fMRI data were
collected ({n_slices} slices{so_str}; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms};
voxel size={vs}mm{mb_str}{pr_str}).
Each run was {length} minutes in length, during which
{n_vols} functional volumes were acquired.
'''.format(run_str=run_str,
task=task_name,
variants=variants,
seqs=seqs,
me_str=me_str,
n_slices=n_slices,
so_str=so_str,
tr=num_to_str(tr*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str,
length=length,
n_vols=n_tps,
mb_str=mb_str,
pr_str=pr_str
)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc | [
"def",
"func_info",
"(",
"task",
",",
"n_runs",
",",
"metadata",
",",
"img",
",",
"config",
")",
":",
"if",
"metadata",
".",
"get",
"(",
"'MultibandAccelerationFactor'",
",",
"1",
")",
">",
"1",
":",
"mb_str",
"=",
"'; MB factor={}'",
".",
"format",
"(",
"metadata",
"[",
"'MultibandAccelerationFactor'",
"]",
")",
"else",
":",
"mb_str",
"=",
"''",
"if",
"metadata",
".",
"get",
"(",
"'ParallelReductionFactorInPlane'",
",",
"1",
")",
">",
"1",
":",
"pr_str",
"=",
"(",
"'; in-plane acceleration factor='",
"'{}'",
".",
"format",
"(",
"metadata",
"[",
"'ParallelReductionFactorInPlane'",
"]",
")",
")",
"else",
":",
"pr_str",
"=",
"''",
"if",
"'SliceTiming'",
"in",
"metadata",
".",
"keys",
"(",
")",
":",
"so_str",
"=",
"' in {0} order'",
".",
"format",
"(",
"get_slice_info",
"(",
"metadata",
"[",
"'SliceTiming'",
"]",
")",
")",
"else",
":",
"so_str",
"=",
"''",
"if",
"'EchoTime'",
"in",
"metadata",
".",
"keys",
"(",
")",
":",
"if",
"isinstance",
"(",
"metadata",
"[",
"'EchoTime'",
"]",
",",
"list",
")",
":",
"te",
"=",
"[",
"num_to_str",
"(",
"t",
"*",
"1000",
")",
"for",
"t",
"in",
"metadata",
"[",
"'EchoTime'",
"]",
"]",
"te_temp",
"=",
"', '",
".",
"join",
"(",
"te",
"[",
":",
"-",
"1",
"]",
")",
"te_temp",
"+=",
"', and {}'",
".",
"format",
"(",
"te",
"[",
"-",
"1",
"]",
")",
"te",
"=",
"te_temp",
"me_str",
"=",
"'multi-echo '",
"else",
":",
"te",
"=",
"num_to_str",
"(",
"metadata",
"[",
"'EchoTime'",
"]",
"*",
"1000",
")",
"me_str",
"=",
"'single-echo '",
"else",
":",
"te",
"=",
"'UNKNOWN'",
"me_str",
"=",
"'UNKNOWN-echo'",
"task_name",
"=",
"metadata",
".",
"get",
"(",
"'TaskName'",
",",
"task",
"+",
"' task'",
")",
"seqs",
",",
"variants",
"=",
"get_seqstr",
"(",
"config",
",",
"metadata",
")",
"n_slices",
",",
"vs_str",
",",
"ms_str",
",",
"fov_str",
"=",
"get_sizestr",
"(",
"img",
")",
"tr",
"=",
"metadata",
"[",
"'RepetitionTime'",
"]",
"n_tps",
"=",
"img",
".",
"shape",
"[",
"3",
"]",
"run_secs",
"=",
"math",
".",
"ceil",
"(",
"n_tps",
"*",
"tr",
")",
"mins",
",",
"secs",
"=",
"divmod",
"(",
"run_secs",
",",
"60",
")",
"length",
"=",
"'{0}:{1:02.0f}'",
".",
"format",
"(",
"int",
"(",
"mins",
")",
",",
"int",
"(",
"secs",
")",
")",
"if",
"n_runs",
"==",
"1",
":",
"run_str",
"=",
"'{0} run'",
".",
"format",
"(",
"num2words",
"(",
"n_runs",
")",
".",
"title",
"(",
")",
")",
"else",
":",
"run_str",
"=",
"'{0} runs'",
".",
"format",
"(",
"num2words",
"(",
"n_runs",
")",
".",
"title",
"(",
")",
")",
"desc",
"=",
"'''\n {run_str} of {task} {variants} {seqs} {me_str} fMRI data were\n collected ({n_slices} slices{so_str}; repetition time, TR={tr}ms;\n echo time, TE={te}ms; flip angle, FA={fa}<deg>;\n field of view, FOV={fov}mm; matrix size={ms};\n voxel size={vs}mm{mb_str}{pr_str}).\n Each run was {length} minutes in length, during which\n {n_vols} functional volumes were acquired.\n '''",
".",
"format",
"(",
"run_str",
"=",
"run_str",
",",
"task",
"=",
"task_name",
",",
"variants",
"=",
"variants",
",",
"seqs",
"=",
"seqs",
",",
"me_str",
"=",
"me_str",
",",
"n_slices",
"=",
"n_slices",
",",
"so_str",
"=",
"so_str",
",",
"tr",
"=",
"num_to_str",
"(",
"tr",
"*",
"1000",
")",
",",
"te",
"=",
"te",
",",
"fa",
"=",
"metadata",
".",
"get",
"(",
"'FlipAngle'",
",",
"'UNKNOWN'",
")",
",",
"vs",
"=",
"vs_str",
",",
"fov",
"=",
"fov_str",
",",
"ms",
"=",
"ms_str",
",",
"length",
"=",
"length",
",",
"n_vols",
"=",
"n_tps",
",",
"mb_str",
"=",
"mb_str",
",",
"pr_str",
"=",
"pr_str",
")",
"desc",
"=",
"desc",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
".",
"lstrip",
"(",
")",
"while",
"' '",
"in",
"desc",
":",
"desc",
"=",
"desc",
".",
"replace",
"(",
"' '",
",",
"' '",
")",
"return",
"desc"
] | 34.676768 | 0.001133 | [
"def func_info(task, n_runs, metadata, img, config):\n",
" \"\"\"\n",
" Generate a paragraph describing T2*-weighted functional scans.\n",
"\n",
" Parameters\n",
" ----------\n",
" task : :obj:`str`\n",
" The name of the task.\n",
" n_runs : :obj:`int`\n",
" The number of runs acquired for this task.\n",
" metadata : :obj:`dict`\n",
" The metadata for the scan from the json associated with the scan.\n",
" img : :obj:`nibabel.Nifti1Image`\n",
" Image corresponding to one of the runs.\n",
" config : :obj:`dict`\n",
" A dictionary with relevant information regarding sequences, sequence\n",
" variants, phase encoding directions, and task names.\n",
"\n",
" Returns\n",
" -------\n",
" desc : :obj:`str`\n",
" A description of the scan's acquisition information.\n",
" \"\"\"\n",
" if metadata.get('MultibandAccelerationFactor', 1) > 1:\n",
" mb_str = '; MB factor={}'.format(metadata['MultibandAccelerationFactor'])\n",
" else:\n",
" mb_str = ''\n",
"\n",
" if metadata.get('ParallelReductionFactorInPlane', 1) > 1:\n",
" pr_str = ('; in-plane acceleration factor='\n",
" '{}'.format(metadata['ParallelReductionFactorInPlane']))\n",
" else:\n",
" pr_str = ''\n",
"\n",
" if 'SliceTiming' in metadata.keys():\n",
" so_str = ' in {0} order'.format(get_slice_info(metadata['SliceTiming']))\n",
" else:\n",
" so_str = ''\n",
"\n",
" if 'EchoTime' in metadata.keys():\n",
" if isinstance(metadata['EchoTime'], list):\n",
" te = [num_to_str(t*1000) for t in metadata['EchoTime']]\n",
" te_temp = ', '.join(te[:-1])\n",
" te_temp += ', and {}'.format(te[-1])\n",
" te = te_temp\n",
" me_str = 'multi-echo '\n",
" else:\n",
" te = num_to_str(metadata['EchoTime']*1000)\n",
" me_str = 'single-echo '\n",
" else:\n",
" te = 'UNKNOWN'\n",
" me_str = 'UNKNOWN-echo'\n",
"\n",
" task_name = metadata.get('TaskName', task+' task')\n",
" seqs, variants = get_seqstr(config, metadata)\n",
" n_slices, vs_str, ms_str, fov_str = get_sizestr(img)\n",
"\n",
" tr = metadata['RepetitionTime']\n",
" n_tps = img.shape[3]\n",
" run_secs = math.ceil(n_tps * tr)\n",
" mins, secs = divmod(run_secs, 60)\n",
" length = '{0}:{1:02.0f}'.format(int(mins), int(secs))\n",
"\n",
" if n_runs == 1:\n",
" run_str = '{0} run'.format(num2words(n_runs).title())\n",
" else:\n",
" run_str = '{0} runs'.format(num2words(n_runs).title())\n",
"\n",
" desc = '''\n",
" {run_str} of {task} {variants} {seqs} {me_str} fMRI data were\n",
" collected ({n_slices} slices{so_str}; repetition time, TR={tr}ms;\n",
" echo time, TE={te}ms; flip angle, FA={fa}<deg>;\n",
" field of view, FOV={fov}mm; matrix size={ms};\n",
" voxel size={vs}mm{mb_str}{pr_str}).\n",
" Each run was {length} minutes in length, during which\n",
" {n_vols} functional volumes were acquired.\n",
" '''.format(run_str=run_str,\n",
" task=task_name,\n",
" variants=variants,\n",
" seqs=seqs,\n",
" me_str=me_str,\n",
" n_slices=n_slices,\n",
" so_str=so_str,\n",
" tr=num_to_str(tr*1000),\n",
" te=te,\n",
" fa=metadata.get('FlipAngle', 'UNKNOWN'),\n",
" vs=vs_str,\n",
" fov=fov_str,\n",
" ms=ms_str,\n",
" length=length,\n",
" n_vols=n_tps,\n",
" mb_str=mb_str,\n",
" pr_str=pr_str\n",
" )\n",
" desc = desc.replace('\\n', ' ').lstrip()\n",
" while ' ' in desc:\n",
" desc = desc.replace(' ', ' ')\n",
"\n",
" return desc"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0.06666666666666667
] | 99 | 0.00136 |
def call_actions(self, event, *args, **kwargs):
"""Call each function in self._actions after setting self._event."""
self._event = event
for func in self._actions:
func(event, *args, **kwargs) | [
"def",
"call_actions",
"(",
"self",
",",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_event",
"=",
"event",
"for",
"func",
"in",
"self",
".",
"_actions",
":",
"func",
"(",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 37.333333 | 0.008734 | [
"def call_actions(self, event, *args, **kwargs):\n",
" \"\"\"Call each function in self._actions after setting self._event.\"\"\"\n",
" self._event = event\n",
"\n",
" for func in self._actions:\n",
" func(event, *args, **kwargs)"
] | [
0,
0.012987012987012988,
0,
0,
0,
0.025
] | 6 | 0.006331 |
def _get_uncertainties(self, star_group_size):
"""
Retrieve uncertainties on fitted parameters from the fitter
object.
Parameters
----------
star_group_size : int
Number of stars in the given group.
Returns
-------
unc_tab : `~astropy.table.Table`
Table which contains uncertainties on the fitted parameters.
The uncertainties are reported as one standard deviation.
"""
unc_tab = Table()
for param_name in self.psf_model.param_names:
if not self.psf_model.fixed[param_name]:
unc_tab.add_column(Column(name=param_name + "_unc",
data=np.empty(star_group_size)))
if 'param_cov' in self.fitter.fit_info.keys():
if self.fitter.fit_info['param_cov'] is not None:
k = 0
n_fit_params = len(unc_tab.colnames)
for i in range(star_group_size):
unc_tab[i] = np.sqrt(np.diag(
self.fitter.fit_info['param_cov'])
)[k: k + n_fit_params]
k = k + n_fit_params
return unc_tab | [
"def",
"_get_uncertainties",
"(",
"self",
",",
"star_group_size",
")",
":",
"unc_tab",
"=",
"Table",
"(",
")",
"for",
"param_name",
"in",
"self",
".",
"psf_model",
".",
"param_names",
":",
"if",
"not",
"self",
".",
"psf_model",
".",
"fixed",
"[",
"param_name",
"]",
":",
"unc_tab",
".",
"add_column",
"(",
"Column",
"(",
"name",
"=",
"param_name",
"+",
"\"_unc\"",
",",
"data",
"=",
"np",
".",
"empty",
"(",
"star_group_size",
")",
")",
")",
"if",
"'param_cov'",
"in",
"self",
".",
"fitter",
".",
"fit_info",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"fitter",
".",
"fit_info",
"[",
"'param_cov'",
"]",
"is",
"not",
"None",
":",
"k",
"=",
"0",
"n_fit_params",
"=",
"len",
"(",
"unc_tab",
".",
"colnames",
")",
"for",
"i",
"in",
"range",
"(",
"star_group_size",
")",
":",
"unc_tab",
"[",
"i",
"]",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"diag",
"(",
"self",
".",
"fitter",
".",
"fit_info",
"[",
"'param_cov'",
"]",
")",
")",
"[",
"k",
":",
"k",
"+",
"n_fit_params",
"]",
"k",
"=",
"k",
"+",
"n_fit_params",
"return",
"unc_tab"
] | 37.272727 | 0.001585 | [
"def _get_uncertainties(self, star_group_size):\n",
" \"\"\"\n",
" Retrieve uncertainties on fitted parameters from the fitter\n",
" object.\n",
"\n",
" Parameters\n",
" ----------\n",
" star_group_size : int\n",
" Number of stars in the given group.\n",
"\n",
" Returns\n",
" -------\n",
" unc_tab : `~astropy.table.Table`\n",
" Table which contains uncertainties on the fitted parameters.\n",
" The uncertainties are reported as one standard deviation.\n",
" \"\"\"\n",
"\n",
" unc_tab = Table()\n",
" for param_name in self.psf_model.param_names:\n",
" if not self.psf_model.fixed[param_name]:\n",
" unc_tab.add_column(Column(name=param_name + \"_unc\",\n",
" data=np.empty(star_group_size)))\n",
"\n",
" if 'param_cov' in self.fitter.fit_info.keys():\n",
" if self.fitter.fit_info['param_cov'] is not None:\n",
" k = 0\n",
" n_fit_params = len(unc_tab.colnames)\n",
" for i in range(star_group_size):\n",
" unc_tab[i] = np.sqrt(np.diag(\n",
" self.fitter.fit_info['param_cov'])\n",
" )[k: k + n_fit_params]\n",
" k = k + n_fit_params\n",
" return unc_tab"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 33 | 0.003903 |
def disconnect(self):
"""
disconnect a client.
"""
logger.info("disconnecting snap7 client")
result = self.library.Cli_Disconnect(self.pointer)
check_error(result, context="client")
return result | [
"def",
"disconnect",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"disconnecting snap7 client\"",
")",
"result",
"=",
"self",
".",
"library",
".",
"Cli_Disconnect",
"(",
"self",
".",
"pointer",
")",
"check_error",
"(",
"result",
",",
"context",
"=",
"\"client\"",
")",
"return",
"result"
] | 30.625 | 0.011905 | [
"def disconnect(self):\n",
" \"\"\"\n",
" disconnect a client.\n",
" \"\"\"\n",
" logger.info(\"disconnecting snap7 client\")\n",
" result = self.library.Cli_Disconnect(self.pointer)\n",
" check_error(result, context=\"client\") \n",
" return result"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0.02127659574468085,
0.047619047619047616
] | 8 | 0.019029 |
def fit(self, X, y, num_training_samples=None):
"""Use correlation data to train a model.
First compute the correlation of the input data,
and then normalize within subject
if more than one sample in one subject,
and then fit to a model defined by self.clf.
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
They contain the activity data filtered by ROIs
and prepared for correlation computation.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
y: 1D numpy array
labels, len(X) equals len(y)
num_training_samples: Optional[int]
The number of samples used in the training.
Set it to construct the kernel matrix
portion by portion so the similarity vectors of the
test data have to be computed here.
Only set num_training_samples when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
Classifier:
self.
"""
time1 = time.time()
assert len(X) == len(y), \
'the number of samples must be equal to the number of labels'
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
if num_training_samples is not None:
num_training_samples = None
logger.warn(
'num_training_samples should not be set for classifiers '
'other than SVM with precomputed kernels'
)
num_samples = len(X1)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
self.num_voxels_ = num_voxels1
self.num_features_ = num_voxels1 * num_voxels2
self.num_samples_ = num_samples
data = self._generate_training_data(X1, X2, num_training_samples)
if num_training_samples is not None:
self.test_raw_data_ = None
self.test_data_ = data[num_training_samples:,
0:num_training_samples]
# limit training to the data specified by num_training_samples
data = data[0:num_training_samples, 0:num_training_samples]
# training
self.clf = self.clf.fit(data, y[0:num_training_samples])
# set the test data
if num_training_samples is None:
self.test_raw_data_ = None
self.test_data_ = None
time2 = time.time()
logger.info(
'training done, takes %.2f s' %
(time2 - time1)
)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"num_training_samples",
"=",
"None",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"assert",
"len",
"(",
"X",
")",
"==",
"len",
"(",
"y",
")",
",",
"'the number of samples must be equal to the number of labels'",
"for",
"x",
"in",
"X",
":",
"assert",
"len",
"(",
"x",
")",
"==",
"2",
",",
"'there must be two parts for each correlation computation'",
"X1",
",",
"X2",
"=",
"zip",
"(",
"*",
"X",
")",
"if",
"not",
"(",
"isinstance",
"(",
"self",
".",
"clf",
",",
"sklearn",
".",
"svm",
".",
"SVC",
")",
"and",
"self",
".",
"clf",
".",
"kernel",
"==",
"'precomputed'",
")",
":",
"if",
"num_training_samples",
"is",
"not",
"None",
":",
"num_training_samples",
"=",
"None",
"logger",
".",
"warn",
"(",
"'num_training_samples should not be set for classifiers '",
"'other than SVM with precomputed kernels'",
")",
"num_samples",
"=",
"len",
"(",
"X1",
")",
"num_voxels1",
"=",
"X1",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"num_voxels2",
"=",
"X2",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"# make sure X1 always has more voxels",
"if",
"num_voxels1",
"<",
"num_voxels2",
":",
"X1",
",",
"X2",
"=",
"X2",
",",
"X1",
"num_voxels1",
",",
"num_voxels2",
"=",
"num_voxels2",
",",
"num_voxels1",
"self",
".",
"num_voxels_",
"=",
"num_voxels1",
"self",
".",
"num_features_",
"=",
"num_voxels1",
"*",
"num_voxels2",
"self",
".",
"num_samples_",
"=",
"num_samples",
"data",
"=",
"self",
".",
"_generate_training_data",
"(",
"X1",
",",
"X2",
",",
"num_training_samples",
")",
"if",
"num_training_samples",
"is",
"not",
"None",
":",
"self",
".",
"test_raw_data_",
"=",
"None",
"self",
".",
"test_data_",
"=",
"data",
"[",
"num_training_samples",
":",
",",
"0",
":",
"num_training_samples",
"]",
"# limit training to the data specified by num_training_samples",
"data",
"=",
"data",
"[",
"0",
":",
"num_training_samples",
",",
"0",
":",
"num_training_samples",
"]",
"# training",
"self",
".",
"clf",
"=",
"self",
".",
"clf",
".",
"fit",
"(",
"data",
",",
"y",
"[",
"0",
":",
"num_training_samples",
"]",
")",
"# set the test data",
"if",
"num_training_samples",
"is",
"None",
":",
"self",
".",
"test_raw_data_",
"=",
"None",
"self",
".",
"test_data_",
"=",
"None",
"time2",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"'training done, takes %.2f s'",
"%",
"(",
"time2",
"-",
"time1",
")",
")",
"return",
"self"
] | 39.962025 | 0.000618 | [
"def fit(self, X, y, num_training_samples=None):\n",
" \"\"\"Use correlation data to train a model.\n",
"\n",
" First compute the correlation of the input data,\n",
" and then normalize within subject\n",
" if more than one sample in one subject,\n",
" and then fit to a model defined by self.clf.\n",
"\n",
" Parameters\n",
" ----------\n",
" X: list of tuple (data1, data2)\n",
" data1 and data2 are numpy array in shape [num_TRs, num_voxels]\n",
" to be computed for correlation.\n",
" They contain the activity data filtered by ROIs\n",
" and prepared for correlation computation.\n",
" Within list, all data1s must have the same num_voxels value,\n",
" all data2s must have the same num_voxels value.\n",
" y: 1D numpy array\n",
" labels, len(X) equals len(y)\n",
" num_training_samples: Optional[int]\n",
" The number of samples used in the training.\n",
" Set it to construct the kernel matrix\n",
" portion by portion so the similarity vectors of the\n",
" test data have to be computed here.\n",
" Only set num_training_samples when sklearn.svm.SVC with\n",
" precomputed kernel is used.\n",
" If it is set, only those samples will be used to fit the model.\n",
"\n",
" Returns\n",
" -------\n",
" Classifier:\n",
" self.\n",
" \"\"\"\n",
" time1 = time.time()\n",
" assert len(X) == len(y), \\\n",
" 'the number of samples must be equal to the number of labels'\n",
" for x in X:\n",
" assert len(x) == 2, \\\n",
" 'there must be two parts for each correlation computation'\n",
" X1, X2 = zip(*X)\n",
" if not (isinstance(self.clf, sklearn.svm.SVC)\n",
" and self.clf.kernel == 'precomputed'):\n",
" if num_training_samples is not None:\n",
" num_training_samples = None\n",
" logger.warn(\n",
" 'num_training_samples should not be set for classifiers '\n",
" 'other than SVM with precomputed kernels'\n",
" )\n",
" num_samples = len(X1)\n",
" num_voxels1 = X1[0].shape[1]\n",
" num_voxels2 = X2[0].shape[1]\n",
" # make sure X1 always has more voxels\n",
" if num_voxels1 < num_voxels2:\n",
" X1, X2 = X2, X1\n",
" num_voxels1, num_voxels2 = num_voxels2, num_voxels1\n",
" self.num_voxels_ = num_voxels1\n",
" self.num_features_ = num_voxels1 * num_voxels2\n",
" self.num_samples_ = num_samples\n",
"\n",
" data = self._generate_training_data(X1, X2, num_training_samples)\n",
"\n",
" if num_training_samples is not None:\n",
" self.test_raw_data_ = None\n",
" self.test_data_ = data[num_training_samples:,\n",
" 0:num_training_samples]\n",
" # limit training to the data specified by num_training_samples\n",
" data = data[0:num_training_samples, 0:num_training_samples]\n",
" # training\n",
" self.clf = self.clf.fit(data, y[0:num_training_samples])\n",
" # set the test data\n",
" if num_training_samples is None:\n",
" self.test_raw_data_ = None\n",
" self.test_data_ = None\n",
" time2 = time.time()\n",
" logger.info(\n",
" 'training done, takes %.2f s' %\n",
" (time2 - time1)\n",
" )\n",
" return self"
] | [
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 79 | 0.000919 |
def execute_command(command):
"""Execute a command and return its output"""
command = shlex.split(command)
try:
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except FileNotFoundError:
raise RuntimeError("Command not found: {}".format(repr(command)))
process.wait()
# TODO: may use another codec to decode
if process.returncode > 0:
stderr = process.stderr.read().decode("utf-8")
raise ValueError("Error executing command: {}".format(repr(stderr)))
return process.stdout.read().decode("utf-8") | [
"def",
"execute_command",
"(",
"command",
")",
":",
"command",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"try",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"except",
"FileNotFoundError",
":",
"raise",
"RuntimeError",
"(",
"\"Command not found: {}\"",
".",
"format",
"(",
"repr",
"(",
"command",
")",
")",
")",
"process",
".",
"wait",
"(",
")",
"# TODO: may use another codec to decode",
"if",
"process",
".",
"returncode",
">",
"0",
":",
"stderr",
"=",
"process",
".",
"stderr",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"raise",
"ValueError",
"(",
"\"Error executing command: {}\"",
".",
"format",
"(",
"repr",
"(",
"stderr",
")",
")",
")",
"return",
"process",
".",
"stdout",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")"
] | 34.684211 | 0.001477 | [
"def execute_command(command):\n",
" \"\"\"Execute a command and return its output\"\"\"\n",
"\n",
" command = shlex.split(command)\n",
" try:\n",
" process = subprocess.Popen(\n",
" command,\n",
" stdin=subprocess.PIPE,\n",
" stdout=subprocess.PIPE,\n",
" stderr=subprocess.PIPE,\n",
" )\n",
" except FileNotFoundError:\n",
" raise RuntimeError(\"Command not found: {}\".format(repr(command)))\n",
" process.wait()\n",
" # TODO: may use another codec to decode\n",
" if process.returncode > 0:\n",
" stderr = process.stderr.read().decode(\"utf-8\")\n",
" raise ValueError(\"Error executing command: {}\".format(repr(stderr)))\n",
" return process.stdout.read().decode(\"utf-8\")"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332
] | 19 | 0.001096 |
def _extract_phot_from_exposure(
expIdIndex,
log,
cachePath,
settings):
"""* extract phot from exposure*
**Key Arguments:**
- ``expIdIndex`` -- index of the exposure to extract the dophot photometry from. A tuple of expId and integer MJD
- ``cachePath`` -- path to the cache of ATLAS data
**Return:**
- ``dophotRows`` -- the list of matched dophot rows
"""
log.info('starting the ``_extract_phot_from_exposure`` method')
global exposureIds
expId = exposureIds[expIdIndex]
# SETUP A DATABASE CONNECTION FOR THE remote database
host = settings["database settings"]["atlasMovers"]["host"]
user = settings["database settings"]["atlasMovers"]["user"]
passwd = settings["database settings"]["atlasMovers"]["password"]
dbName = settings["database settings"]["atlasMovers"]["db"]
try:
sshPort = settings["database settings"][
"atlasMovers"]["tunnel"]["port"]
except:
sshPort = False
thisConn = ms.connect(
host=host,
user=user,
passwd=passwd,
db=dbName,
port=sshPort,
use_unicode=True,
charset='utf8',
client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,
connect_timeout=3600
)
thisConn.autocommit(True)
matchRadius = float(settings["dophot"]["search radius"])
dophotFilePath = cachePath + "/" + \
expId[0][:3] + "/" + str(expId[1]) + "/" + expId[0] + ".dph"
# TEST THE FILE EXISTS
exists = os.path.exists(dophotFilePath)
expId = expId[0]
if not exists:
sqlQuery = """update atlas_exposures set dophot_match = 99 where expname = "%(expId)s" """ % locals(
)
writequery(
log=log,
sqlQuery=sqlQuery,
dbConn=thisConn,
)
log.info(
'the dophot file %(expId)s.dph is missing from the local ATLAS data cache' % locals())
return []
try:
log.debug("attempting to open the file %s" %
(dophotFilePath,))
dophotFile = codecs.open(
dophotFilePath, encoding='utf-8', mode='r')
dophotData = dophotFile.read()
dophotFile.close()
except IOError, e:
message = 'could not open the file %s' % (dophotFilePath,)
log.critical(message)
raise IOError(message)
ra = []
dec = []
dophotLines = dophotData.split("\n")[1:]
# FREE MEMORY
dophotData = None
for r in dophotLines:
r = r.split()
if len(r):
ra.append(float(r[0]))
dec.append(float(r[1]))
ra = np.array(ra)
dec = np.array(dec)
sqlQuery = u"""
select * from orbfit_positions where expname = "%(expId)s"
""" % locals()
try:
orbFitRows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=thisConn,
)
except:
thisConn = ms.connect(
host=host,
user=user,
passwd=passwd,
db=dbName,
port=sshPort,
use_unicode=True,
charset='utf8',
client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,
connect_timeout=3600
)
thisConn.autocommit(True)
orbFitRows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=thisConn,
)
potSources = len(orbFitRows)
raOrb = []
raOrb[:] = [r["ra_deg"] for r in orbFitRows]
decOrb = []
decOrb[:] = [r["dec_deg"] for r in orbFitRows]
raOrb = np.array(raOrb)
decOrb = np.array(decOrb)
mesh = HTM(
depth=12,
log=log
)
matchIndices1, matchIndices2, seps = mesh.match(
ra1=ra,
dec1=dec,
ra2=raOrb,
dec2=decOrb,
radius=matchRadius / 3600.,
convertToArray=False,
maxmatch=0 # 1 = match closest 1, 0 = match all
)
# FREE MEMORY
raOrb = None
decOrb = None
dophotRows = []
for m1, m2, s in zip(matchIndices1, matchIndices2, seps):
# print ra[m1], dec[m1], " -> ", s * 3600., " arcsec -> ",
# raOrb[m2], decOrb[m2]
dList = dophotLines[m1].split()
dDict = {
"ra_deg": dList[0],
"dec_deg": dList[1],
"m": dList[2],
"idx": dList[3],
"type": dList[4],
"xtsk": dList[5],
"ytsk": dList[6],
"fitmag": dList[7],
"dfitmag": dList[8],
"sky": dList[9],
"major": dList[10],
"minor": dList[11],
"phi": dList[12],
"probgal": dList[13],
"apmag": dList[14],
"dapmag": dList[15],
"apsky": dList[16],
"ap_fit": dList[17],
"orbfit_separation_arcsec": s * 3600.,
"orbfit_postions_id": orbFitRows[m2]["primaryId"],
"expname": expId
}
dophotRows.append(dDict)
# FREE MEMORY
dophotLines = None
orbFitRows = None
log.info('completed the ``_extract_phot_from_exposure`` method')
return dophotRows | [
"def",
"_extract_phot_from_exposure",
"(",
"expIdIndex",
",",
"log",
",",
"cachePath",
",",
"settings",
")",
":",
"log",
".",
"info",
"(",
"'starting the ``_extract_phot_from_exposure`` method'",
")",
"global",
"exposureIds",
"expId",
"=",
"exposureIds",
"[",
"expIdIndex",
"]",
"# SETUP A DATABASE CONNECTION FOR THE remote database",
"host",
"=",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"atlasMovers\"",
"]",
"[",
"\"host\"",
"]",
"user",
"=",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"atlasMovers\"",
"]",
"[",
"\"user\"",
"]",
"passwd",
"=",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"atlasMovers\"",
"]",
"[",
"\"password\"",
"]",
"dbName",
"=",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"atlasMovers\"",
"]",
"[",
"\"db\"",
"]",
"try",
":",
"sshPort",
"=",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"atlasMovers\"",
"]",
"[",
"\"tunnel\"",
"]",
"[",
"\"port\"",
"]",
"except",
":",
"sshPort",
"=",
"False",
"thisConn",
"=",
"ms",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"passwd",
"=",
"passwd",
",",
"db",
"=",
"dbName",
",",
"port",
"=",
"sshPort",
",",
"use_unicode",
"=",
"True",
",",
"charset",
"=",
"'utf8'",
",",
"client_flag",
"=",
"ms",
".",
"constants",
".",
"CLIENT",
".",
"MULTI_STATEMENTS",
",",
"connect_timeout",
"=",
"3600",
")",
"thisConn",
".",
"autocommit",
"(",
"True",
")",
"matchRadius",
"=",
"float",
"(",
"settings",
"[",
"\"dophot\"",
"]",
"[",
"\"search radius\"",
"]",
")",
"dophotFilePath",
"=",
"cachePath",
"+",
"\"/\"",
"+",
"expId",
"[",
"0",
"]",
"[",
":",
"3",
"]",
"+",
"\"/\"",
"+",
"str",
"(",
"expId",
"[",
"1",
"]",
")",
"+",
"\"/\"",
"+",
"expId",
"[",
"0",
"]",
"+",
"\".dph\"",
"# TEST THE FILE EXISTS",
"exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"dophotFilePath",
")",
"expId",
"=",
"expId",
"[",
"0",
"]",
"if",
"not",
"exists",
":",
"sqlQuery",
"=",
"\"\"\"update atlas_exposures set dophot_match = 99 where expname = \"%(expId)s\" \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"thisConn",
",",
")",
"log",
".",
"info",
"(",
"'the dophot file %(expId)s.dph is missing from the local ATLAS data cache'",
"%",
"locals",
"(",
")",
")",
"return",
"[",
"]",
"try",
":",
"log",
".",
"debug",
"(",
"\"attempting to open the file %s\"",
"%",
"(",
"dophotFilePath",
",",
")",
")",
"dophotFile",
"=",
"codecs",
".",
"open",
"(",
"dophotFilePath",
",",
"encoding",
"=",
"'utf-8'",
",",
"mode",
"=",
"'r'",
")",
"dophotData",
"=",
"dophotFile",
".",
"read",
"(",
")",
"dophotFile",
".",
"close",
"(",
")",
"except",
"IOError",
",",
"e",
":",
"message",
"=",
"'could not open the file %s'",
"%",
"(",
"dophotFilePath",
",",
")",
"log",
".",
"critical",
"(",
"message",
")",
"raise",
"IOError",
"(",
"message",
")",
"ra",
"=",
"[",
"]",
"dec",
"=",
"[",
"]",
"dophotLines",
"=",
"dophotData",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
":",
"]",
"# FREE MEMORY",
"dophotData",
"=",
"None",
"for",
"r",
"in",
"dophotLines",
":",
"r",
"=",
"r",
".",
"split",
"(",
")",
"if",
"len",
"(",
"r",
")",
":",
"ra",
".",
"append",
"(",
"float",
"(",
"r",
"[",
"0",
"]",
")",
")",
"dec",
".",
"append",
"(",
"float",
"(",
"r",
"[",
"1",
"]",
")",
")",
"ra",
"=",
"np",
".",
"array",
"(",
"ra",
")",
"dec",
"=",
"np",
".",
"array",
"(",
"dec",
")",
"sqlQuery",
"=",
"u\"\"\"\n select * from orbfit_positions where expname = \"%(expId)s\"\n \"\"\"",
"%",
"locals",
"(",
")",
"try",
":",
"orbFitRows",
"=",
"readquery",
"(",
"log",
"=",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"thisConn",
",",
")",
"except",
":",
"thisConn",
"=",
"ms",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"passwd",
"=",
"passwd",
",",
"db",
"=",
"dbName",
",",
"port",
"=",
"sshPort",
",",
"use_unicode",
"=",
"True",
",",
"charset",
"=",
"'utf8'",
",",
"client_flag",
"=",
"ms",
".",
"constants",
".",
"CLIENT",
".",
"MULTI_STATEMENTS",
",",
"connect_timeout",
"=",
"3600",
")",
"thisConn",
".",
"autocommit",
"(",
"True",
")",
"orbFitRows",
"=",
"readquery",
"(",
"log",
"=",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"thisConn",
",",
")",
"potSources",
"=",
"len",
"(",
"orbFitRows",
")",
"raOrb",
"=",
"[",
"]",
"raOrb",
"[",
":",
"]",
"=",
"[",
"r",
"[",
"\"ra_deg\"",
"]",
"for",
"r",
"in",
"orbFitRows",
"]",
"decOrb",
"=",
"[",
"]",
"decOrb",
"[",
":",
"]",
"=",
"[",
"r",
"[",
"\"dec_deg\"",
"]",
"for",
"r",
"in",
"orbFitRows",
"]",
"raOrb",
"=",
"np",
".",
"array",
"(",
"raOrb",
")",
"decOrb",
"=",
"np",
".",
"array",
"(",
"decOrb",
")",
"mesh",
"=",
"HTM",
"(",
"depth",
"=",
"12",
",",
"log",
"=",
"log",
")",
"matchIndices1",
",",
"matchIndices2",
",",
"seps",
"=",
"mesh",
".",
"match",
"(",
"ra1",
"=",
"ra",
",",
"dec1",
"=",
"dec",
",",
"ra2",
"=",
"raOrb",
",",
"dec2",
"=",
"decOrb",
",",
"radius",
"=",
"matchRadius",
"/",
"3600.",
",",
"convertToArray",
"=",
"False",
",",
"maxmatch",
"=",
"0",
"# 1 = match closest 1, 0 = match all",
")",
"# FREE MEMORY",
"raOrb",
"=",
"None",
"decOrb",
"=",
"None",
"dophotRows",
"=",
"[",
"]",
"for",
"m1",
",",
"m2",
",",
"s",
"in",
"zip",
"(",
"matchIndices1",
",",
"matchIndices2",
",",
"seps",
")",
":",
"# print ra[m1], dec[m1], \" -> \", s * 3600., \" arcsec -> \",",
"# raOrb[m2], decOrb[m2]",
"dList",
"=",
"dophotLines",
"[",
"m1",
"]",
".",
"split",
"(",
")",
"dDict",
"=",
"{",
"\"ra_deg\"",
":",
"dList",
"[",
"0",
"]",
",",
"\"dec_deg\"",
":",
"dList",
"[",
"1",
"]",
",",
"\"m\"",
":",
"dList",
"[",
"2",
"]",
",",
"\"idx\"",
":",
"dList",
"[",
"3",
"]",
",",
"\"type\"",
":",
"dList",
"[",
"4",
"]",
",",
"\"xtsk\"",
":",
"dList",
"[",
"5",
"]",
",",
"\"ytsk\"",
":",
"dList",
"[",
"6",
"]",
",",
"\"fitmag\"",
":",
"dList",
"[",
"7",
"]",
",",
"\"dfitmag\"",
":",
"dList",
"[",
"8",
"]",
",",
"\"sky\"",
":",
"dList",
"[",
"9",
"]",
",",
"\"major\"",
":",
"dList",
"[",
"10",
"]",
",",
"\"minor\"",
":",
"dList",
"[",
"11",
"]",
",",
"\"phi\"",
":",
"dList",
"[",
"12",
"]",
",",
"\"probgal\"",
":",
"dList",
"[",
"13",
"]",
",",
"\"apmag\"",
":",
"dList",
"[",
"14",
"]",
",",
"\"dapmag\"",
":",
"dList",
"[",
"15",
"]",
",",
"\"apsky\"",
":",
"dList",
"[",
"16",
"]",
",",
"\"ap_fit\"",
":",
"dList",
"[",
"17",
"]",
",",
"\"orbfit_separation_arcsec\"",
":",
"s",
"*",
"3600.",
",",
"\"orbfit_postions_id\"",
":",
"orbFitRows",
"[",
"m2",
"]",
"[",
"\"primaryId\"",
"]",
",",
"\"expname\"",
":",
"expId",
"}",
"dophotRows",
".",
"append",
"(",
"dDict",
")",
"# FREE MEMORY",
"dophotLines",
"=",
"None",
"orbFitRows",
"=",
"None",
"log",
".",
"info",
"(",
"'completed the ``_extract_phot_from_exposure`` method'",
")",
"return",
"dophotRows"
] | 27.076503 | 0.001168 | [
"def _extract_phot_from_exposure(\n",
" expIdIndex,\n",
" log,\n",
" cachePath,\n",
" settings):\n",
" \"\"\"* extract phot from exposure*\n",
"\n",
" **Key Arguments:**\n",
" - ``expIdIndex`` -- index of the exposure to extract the dophot photometry from. A tuple of expId and integer MJD\n",
" - ``cachePath`` -- path to the cache of ATLAS data\n",
"\n",
" **Return:**\n",
" - ``dophotRows`` -- the list of matched dophot rows\n",
" \"\"\"\n",
" log.info('starting the ``_extract_phot_from_exposure`` method')\n",
"\n",
" global exposureIds\n",
"\n",
" expId = exposureIds[expIdIndex]\n",
"\n",
" # SETUP A DATABASE CONNECTION FOR THE remote database\n",
" host = settings[\"database settings\"][\"atlasMovers\"][\"host\"]\n",
" user = settings[\"database settings\"][\"atlasMovers\"][\"user\"]\n",
" passwd = settings[\"database settings\"][\"atlasMovers\"][\"password\"]\n",
" dbName = settings[\"database settings\"][\"atlasMovers\"][\"db\"]\n",
" try:\n",
" sshPort = settings[\"database settings\"][\n",
" \"atlasMovers\"][\"tunnel\"][\"port\"]\n",
" except:\n",
" sshPort = False\n",
" thisConn = ms.connect(\n",
" host=host,\n",
" user=user,\n",
" passwd=passwd,\n",
" db=dbName,\n",
" port=sshPort,\n",
" use_unicode=True,\n",
" charset='utf8',\n",
" client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,\n",
" connect_timeout=3600\n",
" )\n",
" thisConn.autocommit(True)\n",
"\n",
" matchRadius = float(settings[\"dophot\"][\"search radius\"])\n",
"\n",
" dophotFilePath = cachePath + \"/\" + \\\n",
" expId[0][:3] + \"/\" + str(expId[1]) + \"/\" + expId[0] + \".dph\"\n",
"\n",
" # TEST THE FILE EXISTS\n",
" exists = os.path.exists(dophotFilePath)\n",
" expId = expId[0]\n",
" if not exists:\n",
"\n",
" sqlQuery = \"\"\"update atlas_exposures set dophot_match = 99 where expname = \"%(expId)s\" \"\"\" % locals(\n",
" )\n",
" writequery(\n",
" log=log,\n",
" sqlQuery=sqlQuery,\n",
" dbConn=thisConn,\n",
" )\n",
" log.info(\n",
" 'the dophot file %(expId)s.dph is missing from the local ATLAS data cache' % locals())\n",
" return []\n",
"\n",
" try:\n",
" log.debug(\"attempting to open the file %s\" %\n",
" (dophotFilePath,))\n",
" dophotFile = codecs.open(\n",
" dophotFilePath, encoding='utf-8', mode='r')\n",
" dophotData = dophotFile.read()\n",
" dophotFile.close()\n",
" except IOError, e:\n",
" message = 'could not open the file %s' % (dophotFilePath,)\n",
" log.critical(message)\n",
" raise IOError(message)\n",
"\n",
" ra = []\n",
" dec = []\n",
" dophotLines = dophotData.split(\"\\n\")[1:]\n",
"\n",
" # FREE MEMORY\n",
" dophotData = None\n",
" for r in dophotLines:\n",
" r = r.split()\n",
" if len(r):\n",
" ra.append(float(r[0]))\n",
" dec.append(float(r[1]))\n",
"\n",
" ra = np.array(ra)\n",
" dec = np.array(dec)\n",
"\n",
" sqlQuery = u\"\"\"\n",
" select * from orbfit_positions where expname = \"%(expId)s\"\n",
" \"\"\" % locals()\n",
" try:\n",
" orbFitRows = readquery(\n",
" log=log,\n",
" sqlQuery=sqlQuery,\n",
" dbConn=thisConn,\n",
" )\n",
" except:\n",
" thisConn = ms.connect(\n",
" host=host,\n",
" user=user,\n",
" passwd=passwd,\n",
" db=dbName,\n",
" port=sshPort,\n",
" use_unicode=True,\n",
" charset='utf8',\n",
" client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,\n",
" connect_timeout=3600\n",
" )\n",
" thisConn.autocommit(True)\n",
" orbFitRows = readquery(\n",
" log=log,\n",
" sqlQuery=sqlQuery,\n",
" dbConn=thisConn,\n",
" )\n",
"\n",
" potSources = len(orbFitRows)\n",
"\n",
" raOrb = []\n",
" raOrb[:] = [r[\"ra_deg\"] for r in orbFitRows]\n",
" decOrb = []\n",
" decOrb[:] = [r[\"dec_deg\"] for r in orbFitRows]\n",
"\n",
" raOrb = np.array(raOrb)\n",
" decOrb = np.array(decOrb)\n",
"\n",
" mesh = HTM(\n",
" depth=12,\n",
" log=log\n",
" )\n",
" matchIndices1, matchIndices2, seps = mesh.match(\n",
" ra1=ra,\n",
" dec1=dec,\n",
" ra2=raOrb,\n",
" dec2=decOrb,\n",
" radius=matchRadius / 3600.,\n",
" convertToArray=False,\n",
" maxmatch=0 # 1 = match closest 1, 0 = match all\n",
" )\n",
"\n",
" # FREE MEMORY\n",
" raOrb = None\n",
" decOrb = None\n",
"\n",
" dophotRows = []\n",
" for m1, m2, s in zip(matchIndices1, matchIndices2, seps):\n",
" # print ra[m1], dec[m1], \" -> \", s * 3600., \" arcsec -> \",\n",
" # raOrb[m2], decOrb[m2]\n",
" dList = dophotLines[m1].split()\n",
" dDict = {\n",
" \"ra_deg\": dList[0],\n",
" \"dec_deg\": dList[1],\n",
" \"m\": dList[2],\n",
" \"idx\": dList[3],\n",
" \"type\": dList[4],\n",
" \"xtsk\": dList[5],\n",
" \"ytsk\": dList[6],\n",
" \"fitmag\": dList[7],\n",
" \"dfitmag\": dList[8],\n",
" \"sky\": dList[9],\n",
" \"major\": dList[10],\n",
" \"minor\": dList[11],\n",
" \"phi\": dList[12],\n",
" \"probgal\": dList[13],\n",
" \"apmag\": dList[14],\n",
" \"dapmag\": dList[15],\n",
" \"apsky\": dList[16],\n",
" \"ap_fit\": dList[17],\n",
" \"orbfit_separation_arcsec\": s * 3600.,\n",
" \"orbfit_postions_id\": orbFitRows[m2][\"primaryId\"],\n",
" \"expname\": expId\n",
" }\n",
" dophotRows.append(dDict)\n",
"\n",
" # FREE MEMORY\n",
" dophotLines = None\n",
" orbFitRows = None\n",
"\n",
" log.info('completed the ``_extract_phot_from_exposure`` method')\n",
" return dophotRows"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.00819672131147541,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 183 | 0.001321 |
async def github_request(session, api_token,
query=None, mutation=None, variables=None):
"""Send a request to the GitHub v4 (GraphQL) API.
The request is asynchronous, with asyncio.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
query : `str` or `GitHubQuery`
GraphQL query string. If provided, then the ``mutation`` parameter
should not be set. For examples, see the `GitHub guide to query and
mutation operations`_.
mutation : `str` or `GitHubQuery`
GraphQL mutation string. If provided, then the ``query`` parameter
should not be set. For examples, see the `GitHub guide to query and
mutation operations`_.
variables : `dict`
GraphQL variables, as a JSON-compatible dictionary. This is only
required if the ``query`` or ``mutation`` uses GraphQL variables.
Returns
-------
data : `dict`
Parsed JSON as a `dict` object.
.. `GitHub personal access token guide`: https://ls.st/41d
.. `GitHub guide to query and mutation operations`: https://ls.st/9s7
"""
payload = {}
if query is not None:
payload['query'] = str(query) # converts a GitHubQuery
if mutation is not None:
payload['mutation'] = str(mutation) # converts a GitHubQuery
if variables is not None:
payload['variables'] = variables
headers = {'Authorization': 'token {}'.format(api_token)}
url = 'https://api.github.com/graphql'
async with session.post(url, json=payload, headers=headers) as response:
data = await response.json()
return data | [
"async",
"def",
"github_request",
"(",
"session",
",",
"api_token",
",",
"query",
"=",
"None",
",",
"mutation",
"=",
"None",
",",
"variables",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"}",
"if",
"query",
"is",
"not",
"None",
":",
"payload",
"[",
"'query'",
"]",
"=",
"str",
"(",
"query",
")",
"# converts a GitHubQuery",
"if",
"mutation",
"is",
"not",
"None",
":",
"payload",
"[",
"'mutation'",
"]",
"=",
"str",
"(",
"mutation",
")",
"# converts a GitHubQuery",
"if",
"variables",
"is",
"not",
"None",
":",
"payload",
"[",
"'variables'",
"]",
"=",
"variables",
"headers",
"=",
"{",
"'Authorization'",
":",
"'token {}'",
".",
"format",
"(",
"api_token",
")",
"}",
"url",
"=",
"'https://api.github.com/graphql'",
"async",
"with",
"session",
".",
"post",
"(",
"url",
",",
"json",
"=",
"payload",
",",
"headers",
"=",
"headers",
")",
"as",
"response",
":",
"data",
"=",
"await",
"response",
".",
"json",
"(",
")",
"return",
"data"
] | 36.520833 | 0.000556 | [
"async def github_request(session, api_token,\n",
" query=None, mutation=None, variables=None):\n",
" \"\"\"Send a request to the GitHub v4 (GraphQL) API.\n",
"\n",
" The request is asynchronous, with asyncio.\n",
"\n",
" Parameters\n",
" ----------\n",
" session : `aiohttp.ClientSession`\n",
" Your application's aiohttp client session.\n",
" api_token : `str`\n",
" A GitHub personal API token. See the `GitHub personal access token\n",
" guide`_.\n",
" query : `str` or `GitHubQuery`\n",
" GraphQL query string. If provided, then the ``mutation`` parameter\n",
" should not be set. For examples, see the `GitHub guide to query and\n",
" mutation operations`_.\n",
" mutation : `str` or `GitHubQuery`\n",
" GraphQL mutation string. If provided, then the ``query`` parameter\n",
" should not be set. For examples, see the `GitHub guide to query and\n",
" mutation operations`_.\n",
" variables : `dict`\n",
" GraphQL variables, as a JSON-compatible dictionary. This is only\n",
" required if the ``query`` or ``mutation`` uses GraphQL variables.\n",
"\n",
" Returns\n",
" -------\n",
" data : `dict`\n",
" Parsed JSON as a `dict` object.\n",
"\n",
" .. `GitHub personal access token guide`: https://ls.st/41d\n",
" .. `GitHub guide to query and mutation operations`: https://ls.st/9s7\n",
" \"\"\"\n",
" payload = {}\n",
" if query is not None:\n",
" payload['query'] = str(query) # converts a GitHubQuery\n",
" if mutation is not None:\n",
" payload['mutation'] = str(mutation) # converts a GitHubQuery\n",
" if variables is not None:\n",
" payload['variables'] = variables\n",
"\n",
" headers = {'Authorization': 'token {}'.format(api_token)}\n",
"\n",
" url = 'https://api.github.com/graphql'\n",
" async with session.post(url, json=payload, headers=headers) as response:\n",
" data = await response.json()\n",
"\n",
" return data"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 48 | 0.001389 |
def ProcessNewBlock(self, block):
"""
Processes a block on the blockchain. This should be done in a sequential order, ie block 4 should be
only processed after block 3.
Args:
block: (neo.Core.Block) a block on the blockchain.
"""
added = set()
changed = set()
deleted = set()
try:
# go through the list of transactions in the block and enumerate
# over their outputs
for tx in block.FullTransactions:
for index, output in enumerate(tx.outputs):
# check to see if the outputs in the tx are in this wallet
state = self.CheckAddressState(output.ScriptHash)
if state & AddressState.InWallet > 0:
# if it's in the wallet, check to see if the coin exists yet
key = CoinReference(tx.Hash, index)
# if it exists, update it, otherwise create a new one
if key in self._coins.keys():
coin = self._coins[key]
coin.State |= CoinState.Confirmed
changed.add(coin)
else:
newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Confirmed, transaction=tx)
self._coins[key] = newcoin
added.add(newcoin)
if state & AddressState.WatchOnly > 0:
self._coins[key].State |= CoinState.WatchOnly
changed.add(self._coins[key])
# now iterate over the inputs of the tx and do the same
for tx in block.FullTransactions:
for input in tx.inputs:
if input in self._coins.keys():
if self._coins[input].Output.AssetId == Blockchain.SystemShare().Hash:
coin = self._coins[input]
coin.State |= CoinState.Spent | CoinState.Confirmed
changed.add(coin)
else:
deleted.add(self._coins[input])
del self._coins[input]
for claimTx in [tx for tx in block.Transactions if tx.Type == TransactionType.ClaimTransaction]:
for ref in claimTx.Claims:
if ref in self._coins.keys():
deleted.add(self._coins[ref])
del self._coins[ref]
# update the current height of the wallet
self._current_height += 1
# in the case that another wallet implementation needs to do something
# with the coins that have been changed ( ie persist to db ) this
# method is called
self.OnProcessNewBlock(block, added, changed, deleted)
# this is not necessary at the moment, but any outside process
# that wants to subscribe to the balance changed event could do
# so from the BalanceChanged method
if len(added) + len(deleted) + len(changed) > 0:
self.BalanceChanged()
except Exception as e:
traceback.print_stack()
traceback.print_exc()
logger.error("could not process %s " % e) | [
"def",
"ProcessNewBlock",
"(",
"self",
",",
"block",
")",
":",
"added",
"=",
"set",
"(",
")",
"changed",
"=",
"set",
"(",
")",
"deleted",
"=",
"set",
"(",
")",
"try",
":",
"# go through the list of transactions in the block and enumerate",
"# over their outputs",
"for",
"tx",
"in",
"block",
".",
"FullTransactions",
":",
"for",
"index",
",",
"output",
"in",
"enumerate",
"(",
"tx",
".",
"outputs",
")",
":",
"# check to see if the outputs in the tx are in this wallet",
"state",
"=",
"self",
".",
"CheckAddressState",
"(",
"output",
".",
"ScriptHash",
")",
"if",
"state",
"&",
"AddressState",
".",
"InWallet",
">",
"0",
":",
"# if it's in the wallet, check to see if the coin exists yet",
"key",
"=",
"CoinReference",
"(",
"tx",
".",
"Hash",
",",
"index",
")",
"# if it exists, update it, otherwise create a new one",
"if",
"key",
"in",
"self",
".",
"_coins",
".",
"keys",
"(",
")",
":",
"coin",
"=",
"self",
".",
"_coins",
"[",
"key",
"]",
"coin",
".",
"State",
"|=",
"CoinState",
".",
"Confirmed",
"changed",
".",
"add",
"(",
"coin",
")",
"else",
":",
"newcoin",
"=",
"Coin",
".",
"CoinFromRef",
"(",
"coin_ref",
"=",
"key",
",",
"tx_output",
"=",
"output",
",",
"state",
"=",
"CoinState",
".",
"Confirmed",
",",
"transaction",
"=",
"tx",
")",
"self",
".",
"_coins",
"[",
"key",
"]",
"=",
"newcoin",
"added",
".",
"add",
"(",
"newcoin",
")",
"if",
"state",
"&",
"AddressState",
".",
"WatchOnly",
">",
"0",
":",
"self",
".",
"_coins",
"[",
"key",
"]",
".",
"State",
"|=",
"CoinState",
".",
"WatchOnly",
"changed",
".",
"add",
"(",
"self",
".",
"_coins",
"[",
"key",
"]",
")",
"# now iterate over the inputs of the tx and do the same",
"for",
"tx",
"in",
"block",
".",
"FullTransactions",
":",
"for",
"input",
"in",
"tx",
".",
"inputs",
":",
"if",
"input",
"in",
"self",
".",
"_coins",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"_coins",
"[",
"input",
"]",
".",
"Output",
".",
"AssetId",
"==",
"Blockchain",
".",
"SystemShare",
"(",
")",
".",
"Hash",
":",
"coin",
"=",
"self",
".",
"_coins",
"[",
"input",
"]",
"coin",
".",
"State",
"|=",
"CoinState",
".",
"Spent",
"|",
"CoinState",
".",
"Confirmed",
"changed",
".",
"add",
"(",
"coin",
")",
"else",
":",
"deleted",
".",
"add",
"(",
"self",
".",
"_coins",
"[",
"input",
"]",
")",
"del",
"self",
".",
"_coins",
"[",
"input",
"]",
"for",
"claimTx",
"in",
"[",
"tx",
"for",
"tx",
"in",
"block",
".",
"Transactions",
"if",
"tx",
".",
"Type",
"==",
"TransactionType",
".",
"ClaimTransaction",
"]",
":",
"for",
"ref",
"in",
"claimTx",
".",
"Claims",
":",
"if",
"ref",
"in",
"self",
".",
"_coins",
".",
"keys",
"(",
")",
":",
"deleted",
".",
"add",
"(",
"self",
".",
"_coins",
"[",
"ref",
"]",
")",
"del",
"self",
".",
"_coins",
"[",
"ref",
"]",
"# update the current height of the wallet",
"self",
".",
"_current_height",
"+=",
"1",
"# in the case that another wallet implementation needs to do something",
"# with the coins that have been changed ( ie persist to db ) this",
"# method is called",
"self",
".",
"OnProcessNewBlock",
"(",
"block",
",",
"added",
",",
"changed",
",",
"deleted",
")",
"# this is not necessary at the moment, but any outside process",
"# that wants to subscribe to the balance changed event could do",
"# so from the BalanceChanged method",
"if",
"len",
"(",
"added",
")",
"+",
"len",
"(",
"deleted",
")",
"+",
"len",
"(",
"changed",
")",
">",
"0",
":",
"self",
".",
"BalanceChanged",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"traceback",
".",
"print_stack",
"(",
")",
"traceback",
".",
"print_exc",
"(",
")",
"logger",
".",
"error",
"(",
"\"could not process %s \"",
"%",
"e",
")"
] | 41.320988 | 0.002334 | [
"def ProcessNewBlock(self, block):\n",
" \"\"\"\n",
" Processes a block on the blockchain. This should be done in a sequential order, ie block 4 should be\n",
" only processed after block 3.\n",
"\n",
" Args:\n",
" block: (neo.Core.Block) a block on the blockchain.\n",
" \"\"\"\n",
" added = set()\n",
" changed = set()\n",
" deleted = set()\n",
"\n",
" try:\n",
" # go through the list of transactions in the block and enumerate\n",
" # over their outputs\n",
" for tx in block.FullTransactions:\n",
"\n",
" for index, output in enumerate(tx.outputs):\n",
"\n",
" # check to see if the outputs in the tx are in this wallet\n",
" state = self.CheckAddressState(output.ScriptHash)\n",
"\n",
" if state & AddressState.InWallet > 0:\n",
"\n",
" # if it's in the wallet, check to see if the coin exists yet\n",
" key = CoinReference(tx.Hash, index)\n",
"\n",
" # if it exists, update it, otherwise create a new one\n",
" if key in self._coins.keys():\n",
" coin = self._coins[key]\n",
" coin.State |= CoinState.Confirmed\n",
" changed.add(coin)\n",
" else:\n",
" newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Confirmed, transaction=tx)\n",
" self._coins[key] = newcoin\n",
" added.add(newcoin)\n",
"\n",
" if state & AddressState.WatchOnly > 0:\n",
" self._coins[key].State |= CoinState.WatchOnly\n",
" changed.add(self._coins[key])\n",
"\n",
" # now iterate over the inputs of the tx and do the same\n",
" for tx in block.FullTransactions:\n",
"\n",
" for input in tx.inputs:\n",
"\n",
" if input in self._coins.keys():\n",
" if self._coins[input].Output.AssetId == Blockchain.SystemShare().Hash:\n",
" coin = self._coins[input]\n",
" coin.State |= CoinState.Spent | CoinState.Confirmed\n",
" changed.add(coin)\n",
"\n",
" else:\n",
" deleted.add(self._coins[input])\n",
" del self._coins[input]\n",
"\n",
" for claimTx in [tx for tx in block.Transactions if tx.Type == TransactionType.ClaimTransaction]:\n",
"\n",
" for ref in claimTx.Claims:\n",
" if ref in self._coins.keys():\n",
" deleted.add(self._coins[ref])\n",
" del self._coins[ref]\n",
"\n",
" # update the current height of the wallet\n",
" self._current_height += 1\n",
"\n",
" # in the case that another wallet implementation needs to do something\n",
" # with the coins that have been changed ( ie persist to db ) this\n",
" # method is called\n",
" self.OnProcessNewBlock(block, added, changed, deleted)\n",
"\n",
" # this is not necessary at the moment, but any outside process\n",
" # that wants to subscribe to the balance changed event could do\n",
" # so from the BalanceChanged method\n",
" if len(added) + len(deleted) + len(changed) > 0:\n",
" self.BalanceChanged()\n",
"\n",
" except Exception as e:\n",
" traceback.print_stack()\n",
" traceback.print_exc()\n",
" logger.error(\"could not process %s \" % e)"
] | [
0,
0.08333333333333333,
0.00909090909090909,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0.007692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886
] | 81 | 0.002006 |
def open_file(default_dir='~', extensions=None,
title='Choose a file', multiple_files=False, directory=False):
'''Start the native file dialog for opening file(s).
Starts the system native file dialog in order to open a file (or multiple files).
The toolkit used for each platform:
+-------------------------------------+------------------------------+
| Windows | Windows API (Win32) |
+-------------------------------------+------------------------------+
| Mac OS X | Cocoa |
+-------------------------------------+------------------------------+
| GNOME, Unity, Cinnamon, Pantheon | GTK+ 3 |
+-------------------------------------+------------------------------+
| KDE, LXQt | Qt 5 (fallback: Qt 4/GTK+ 3) |
+-------------------------------------+------------------------------+
| Other desktops (Xfce, WMs etc) | GTK+ 2 (fallback: GTK+ 3) |
+-------------------------------------+------------------------------+
**Note on Dependencies**
It depends on pywin32 for Windows (installed by default in Python for Windows)
It depends on `PyQt <https://riverbankcomputing.com/software/pyqt>`_ for KDE and LxQt (usually installed by default on these).
It depends on `PyGObject <https://wiki.gnome.org/Projects/PyGObject>`_ for GNOME etc. (virtually every Linux desktop has this).
It depends on `PyGTK <https://pygtk.org>`_ for other desktops (not usually installed, so has a GTK+ 3 fallback).
Args:
default_dir (str) : The directory to start the dialog in. Default: User home directory.
extensions (dict) : The extensions to filter by. Format:
.. code-block:: python
{
'Filter Name (example: Image Files)': ['*.png', '*.whatever', '*']
}
title (str) : The title of the dialog. Default: `Choose a file`
multiple_files (bool): Whether to choose multiple files or single files only. Default: `False`
directory (bool): Whether to choose directories. Default: `False`
Returns:
list: `list` of `str` s (each `str` being a selected file). If nothing is selected/dialog is cancelled, it is `None`.
'''
default_dir = os.path.expanduser(default_dir)
if not extensions:
extensions = {}
if system.get_name() == 'windows':
pass # TODO: Implement Win32 file dialog
elif system.get_name() == 'mac':
pass # TODO: Implement Cocoa file dialog
else:
def gtk3_dialog():
# GTK+ 3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class FileChooserWindow(Gtk.Window):
def __init__(self):
self.path = ''
Gtk.Window.__init__(self, title='')
dialog = Gtk.FileChooserDialog(title, None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK)
)
if extensions:
for entry in extensions:
file_filter = Gtk.FileFilter()
file_filter.set_name(entry)
for pattern in extensions[entry]:
file_filter.add_pattern(pattern)
dialog.add_filter(file_filter)
dialog.set_select_multiple(multiple_files)
dialog.set_current_folder(default_dir)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.path = dialog.get_filenames()
dialog.destroy()
elif response == Gtk.ResponseType.CANCEL:
self.path = None
dialog.destroy()
win = FileChooserWindow()
win.connect('destroy', Gtk.main_quit)
win.connect('delete-event', Gtk.main_quit)
win.show_all()
win.destroy()
win.close()
return win.path
def qt5_dialog():
# Qt 5
try:
from PyQt5 import Qt
except ImportError:
# The API is the same for what this uses
from PyQt4 import Qt
class FileChooserWindow(Qt.QWidget):
def __init__(self):
super().__init__()
extensions_string = ''
if extensions:
for entry in extensions:
# entry → Filter name (i.e. 'Image Files' etc)
# value → Filter expression (i.e. '*.png, *.jpg'
# etc)
extensions_string += '%s (%s);;' % (entry,
' '.join(extensions[entry]))
else:
extensions_string = 'All Files (*)'
dialog = Qt.QFileDialog()
if multiple_files:
dialog.setFileMode(Qt.QFileDialog.ExistingFiles)
if directory:
dialog.setFileMode(Qt.QFileDialog.Directory)
dialog.setWindowTitle(title)
dialog.setDirectory(default_dir)
dialog.setNameFilter(extensions_string)
if dialog.exec_():
self.path = dialog.selectedFiles()
else:
self.path = None
app = Qt.QApplication(sys.argv)
win = FileChooserWindow()
win.close()
if win.path:
return win.path
else:
return None
app.exec_()
def gtk2_dialog():
# GTK+ 2
import pygtk
pygtk.require('2.0')
dialog = gtk.FileChooserDialog(title, None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
if extensions:
for entry in extensions:
file_filter = gtk.FileFilter()
file_filter.set_name(entry)
for pattern in extensions[entry]:
file_filter.add_pattern(pattern)
dialog.add_filter(file_filter)
dialog.set_select_multiple(multiple_files)
response = dialog.run()
if response == gtk.RESPONSE_OK:
return dialog.get_filenames()
elif response == gtk.RESPONSE_CANCEL:
return None
dialog.destroy()
if system.get_name() in ['gnome', 'unity', 'cinnamon', 'pantheon']:
return gtk3_dialog()
elif system.get_name() in ['kde', 'lxqt']:
try:
return qt5_dialog()
except ImportError:
return gtk3_dialog()
else:
try:
return gtk2_dialog()
except ImportError:
return gtk3_dialog() | [
"def",
"open_file",
"(",
"default_dir",
"=",
"'~'",
",",
"extensions",
"=",
"None",
",",
"title",
"=",
"'Choose a file'",
",",
"multiple_files",
"=",
"False",
",",
"directory",
"=",
"False",
")",
":",
"default_dir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"default_dir",
")",
"if",
"not",
"extensions",
":",
"extensions",
"=",
"{",
"}",
"if",
"system",
".",
"get_name",
"(",
")",
"==",
"'windows'",
":",
"pass",
"# TODO: Implement Win32 file dialog",
"elif",
"system",
".",
"get_name",
"(",
")",
"==",
"'mac'",
":",
"pass",
"# TODO: Implement Cocoa file dialog",
"else",
":",
"def",
"gtk3_dialog",
"(",
")",
":",
"# GTK+ 3",
"import",
"gi",
"gi",
".",
"require_version",
"(",
"'Gtk'",
",",
"'3.0'",
")",
"from",
"gi",
".",
"repository",
"import",
"Gtk",
"class",
"FileChooserWindow",
"(",
"Gtk",
".",
"Window",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"path",
"=",
"''",
"Gtk",
".",
"Window",
".",
"__init__",
"(",
"self",
",",
"title",
"=",
"''",
")",
"dialog",
"=",
"Gtk",
".",
"FileChooserDialog",
"(",
"title",
",",
"None",
",",
"Gtk",
".",
"FileChooserAction",
".",
"OPEN",
",",
"(",
"Gtk",
".",
"STOCK_CANCEL",
",",
"Gtk",
".",
"ResponseType",
".",
"CANCEL",
",",
"Gtk",
".",
"STOCK_OPEN",
",",
"Gtk",
".",
"ResponseType",
".",
"OK",
")",
")",
"if",
"extensions",
":",
"for",
"entry",
"in",
"extensions",
":",
"file_filter",
"=",
"Gtk",
".",
"FileFilter",
"(",
")",
"file_filter",
".",
"set_name",
"(",
"entry",
")",
"for",
"pattern",
"in",
"extensions",
"[",
"entry",
"]",
":",
"file_filter",
".",
"add_pattern",
"(",
"pattern",
")",
"dialog",
".",
"add_filter",
"(",
"file_filter",
")",
"dialog",
".",
"set_select_multiple",
"(",
"multiple_files",
")",
"dialog",
".",
"set_current_folder",
"(",
"default_dir",
")",
"response",
"=",
"dialog",
".",
"run",
"(",
")",
"if",
"response",
"==",
"Gtk",
".",
"ResponseType",
".",
"OK",
":",
"self",
".",
"path",
"=",
"dialog",
".",
"get_filenames",
"(",
")",
"dialog",
".",
"destroy",
"(",
")",
"elif",
"response",
"==",
"Gtk",
".",
"ResponseType",
".",
"CANCEL",
":",
"self",
".",
"path",
"=",
"None",
"dialog",
".",
"destroy",
"(",
")",
"win",
"=",
"FileChooserWindow",
"(",
")",
"win",
".",
"connect",
"(",
"'destroy'",
",",
"Gtk",
".",
"main_quit",
")",
"win",
".",
"connect",
"(",
"'delete-event'",
",",
"Gtk",
".",
"main_quit",
")",
"win",
".",
"show_all",
"(",
")",
"win",
".",
"destroy",
"(",
")",
"win",
".",
"close",
"(",
")",
"return",
"win",
".",
"path",
"def",
"qt5_dialog",
"(",
")",
":",
"# Qt 5",
"try",
":",
"from",
"PyQt5",
"import",
"Qt",
"except",
"ImportError",
":",
"# The API is the same for what this uses",
"from",
"PyQt4",
"import",
"Qt",
"class",
"FileChooserWindow",
"(",
"Qt",
".",
"QWidget",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"extensions_string",
"=",
"''",
"if",
"extensions",
":",
"for",
"entry",
"in",
"extensions",
":",
"# entry → Filter name (i.e. 'Image Files' etc)",
"# value → Filter expression (i.e. '*.png, *.jpg'",
"# etc)",
"extensions_string",
"+=",
"'%s (%s);;'",
"%",
"(",
"entry",
",",
"' '",
".",
"join",
"(",
"extensions",
"[",
"entry",
"]",
")",
")",
"else",
":",
"extensions_string",
"=",
"'All Files (*)'",
"dialog",
"=",
"Qt",
".",
"QFileDialog",
"(",
")",
"if",
"multiple_files",
":",
"dialog",
".",
"setFileMode",
"(",
"Qt",
".",
"QFileDialog",
".",
"ExistingFiles",
")",
"if",
"directory",
":",
"dialog",
".",
"setFileMode",
"(",
"Qt",
".",
"QFileDialog",
".",
"Directory",
")",
"dialog",
".",
"setWindowTitle",
"(",
"title",
")",
"dialog",
".",
"setDirectory",
"(",
"default_dir",
")",
"dialog",
".",
"setNameFilter",
"(",
"extensions_string",
")",
"if",
"dialog",
".",
"exec_",
"(",
")",
":",
"self",
".",
"path",
"=",
"dialog",
".",
"selectedFiles",
"(",
")",
"else",
":",
"self",
".",
"path",
"=",
"None",
"app",
"=",
"Qt",
".",
"QApplication",
"(",
"sys",
".",
"argv",
")",
"win",
"=",
"FileChooserWindow",
"(",
")",
"win",
".",
"close",
"(",
")",
"if",
"win",
".",
"path",
":",
"return",
"win",
".",
"path",
"else",
":",
"return",
"None",
"app",
".",
"exec_",
"(",
")",
"def",
"gtk2_dialog",
"(",
")",
":",
"# GTK+ 2",
"import",
"pygtk",
"pygtk",
".",
"require",
"(",
"'2.0'",
")",
"dialog",
"=",
"gtk",
".",
"FileChooserDialog",
"(",
"title",
",",
"None",
",",
"gtk",
".",
"FILE_CHOOSER_ACTION_OPEN",
",",
"(",
"gtk",
".",
"STOCK_CANCEL",
",",
"gtk",
".",
"RESPONSE_CANCEL",
",",
"gtk",
".",
"STOCK_OPEN",
",",
"gtk",
".",
"RESPONSE_OK",
")",
")",
"dialog",
".",
"set_default_response",
"(",
"gtk",
".",
"RESPONSE_OK",
")",
"if",
"extensions",
":",
"for",
"entry",
"in",
"extensions",
":",
"file_filter",
"=",
"gtk",
".",
"FileFilter",
"(",
")",
"file_filter",
".",
"set_name",
"(",
"entry",
")",
"for",
"pattern",
"in",
"extensions",
"[",
"entry",
"]",
":",
"file_filter",
".",
"add_pattern",
"(",
"pattern",
")",
"dialog",
".",
"add_filter",
"(",
"file_filter",
")",
"dialog",
".",
"set_select_multiple",
"(",
"multiple_files",
")",
"response",
"=",
"dialog",
".",
"run",
"(",
")",
"if",
"response",
"==",
"gtk",
".",
"RESPONSE_OK",
":",
"return",
"dialog",
".",
"get_filenames",
"(",
")",
"elif",
"response",
"==",
"gtk",
".",
"RESPONSE_CANCEL",
":",
"return",
"None",
"dialog",
".",
"destroy",
"(",
")",
"if",
"system",
".",
"get_name",
"(",
")",
"in",
"[",
"'gnome'",
",",
"'unity'",
",",
"'cinnamon'",
",",
"'pantheon'",
"]",
":",
"return",
"gtk3_dialog",
"(",
")",
"elif",
"system",
".",
"get_name",
"(",
")",
"in",
"[",
"'kde'",
",",
"'lxqt'",
"]",
":",
"try",
":",
"return",
"qt5_dialog",
"(",
")",
"except",
"ImportError",
":",
"return",
"gtk3_dialog",
"(",
")",
"else",
":",
"try",
":",
"return",
"gtk2_dialog",
"(",
")",
"except",
"ImportError",
":",
"return",
"gtk3_dialog",
"(",
")"
] | 24.803493 | 0.030636 | [
"def open_file(default_dir='~', extensions=None,\n",
"\t\t\t title='Choose a file', multiple_files=False, directory=False):\n",
"\t'''Start the native file dialog for opening file(s).\n",
"\n",
"\tStarts the system native file dialog in order to open a file (or multiple files).\n",
"\n",
"\tThe toolkit used for each platform:\n",
"\n",
"\t+-------------------------------------+------------------------------+\n",
"\t| Windows\t\t\t\t\t\t\t | Windows API (Win32)\t\t |\n",
"\t+-------------------------------------+------------------------------+\n",
"\t| Mac OS X\t\t\t\t\t\t\t| Cocoa\t\t\t\t\t\t|\n",
"\t+-------------------------------------+------------------------------+\n",
"\t| GNOME, Unity, Cinnamon, Pantheon\t| GTK+ 3\t\t\t\t\t |\n",
"\t+-------------------------------------+------------------------------+\n",
"\t| KDE, LXQt\t\t\t\t\t\t | Qt 5 (fallback: Qt 4/GTK+ 3) |\n",
"\t+-------------------------------------+------------------------------+\n",
"\t| Other desktops (Xfce, WMs etc)\t | GTK+ 2 (fallback: GTK+ 3)\t|\n",
"\t+-------------------------------------+------------------------------+\n",
"\n",
"\t**Note on Dependencies**\n",
"\n",
"\tIt depends on pywin32 for Windows (installed by default in Python for Windows)\n",
"\tIt depends on `PyQt <https://riverbankcomputing.com/software/pyqt>`_ for KDE and LxQt (usually installed by default on these).\n",
"\tIt depends on `PyGObject <https://wiki.gnome.org/Projects/PyGObject>`_ for GNOME etc. (virtually every Linux desktop has this).\n",
"\tIt depends on `PyGTK <https://pygtk.org>`_ for other desktops (not usually installed, so has a GTK+ 3 fallback).\n",
"\n",
"\tArgs:\n",
"\t\t\tdefault_dir (str) : The directory to start the dialog in. Default: User home directory.\n",
"\t\t\textensions (dict) : The extensions to filter by. Format:\n",
"\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\t.. code-block:: python\n",
"\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Filter Name (example: Image Files)': ['*.png', '*.whatever', '*']\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n",
"\n",
"\t\t\ttitle\t\t (str) : The title of the dialog. Default: `Choose a file`\n",
"\t\t\tmultiple_files (bool): Whether to choose multiple files or single files only. Default: `False`\n",
"\t\t\tdirectory\t (bool): Whether to choose directories. Default: `False`\n",
"\n",
"\tReturns:\n",
"\t\t\tlist: `list` of `str` s (each `str` being a selected file). If nothing is selected/dialog is cancelled, it is `None`.\n",
"\t'''\n",
"\n",
"\tdefault_dir = os.path.expanduser(default_dir)\n",
"\n",
"\tif not extensions:\n",
"\t\textensions = {}\n",
"\n",
"\tif system.get_name() == 'windows':\n",
"\t\tpass # TODO: Implement Win32 file dialog\n",
"\n",
"\telif system.get_name() == 'mac':\n",
"\t\tpass # TODO: Implement Cocoa file dialog\n",
"\n",
"\telse:\n",
"\n",
"\t\tdef gtk3_dialog():\n",
"\n",
"\t\t\t# GTK+ 3\n",
"\n",
"\t\t\timport gi\n",
"\t\t\tgi.require_version('Gtk', '3.0')\n",
"\n",
"\t\t\tfrom gi.repository import Gtk\n",
"\n",
"\t\t\tclass FileChooserWindow(Gtk.Window):\n",
"\n",
"\t\t\t\tdef __init__(self):\n",
"\n",
"\t\t\t\t\tself.path = ''\n",
"\n",
"\t\t\t\t\tGtk.Window.__init__(self, title='')\n",
"\n",
"\t\t\t\t\tdialog = Gtk.FileChooserDialog(title, None,\n",
"\t\t\t\t\t\t\t\t\t\t\t\t Gtk.FileChooserAction.OPEN,\n",
"\t\t\t\t\t\t\t\t\t\t\t\t (Gtk.STOCK_CANCEL,\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\tGtk.ResponseType.CANCEL,\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\tGtk.STOCK_OPEN,\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\tGtk.ResponseType.OK)\n",
"\t\t\t\t\t\t\t\t\t\t\t\t )\n",
"\n",
"\t\t\t\t\tif extensions:\n",
"\t\t\t\t\t\tfor entry in extensions:\n",
"\t\t\t\t\t\t\tfile_filter = Gtk.FileFilter()\n",
"\t\t\t\t\t\t\tfile_filter.set_name(entry)\n",
"\n",
"\t\t\t\t\t\t\tfor pattern in extensions[entry]:\n",
"\t\t\t\t\t\t\t\tfile_filter.add_pattern(pattern)\n",
"\n",
"\t\t\t\t\t\t\tdialog.add_filter(file_filter)\n",
"\n",
"\t\t\t\t\tdialog.set_select_multiple(multiple_files)\n",
"\n",
"\t\t\t\t\tdialog.set_current_folder(default_dir)\n",
"\n",
"\t\t\t\t\tresponse = dialog.run()\n",
"\n",
"\t\t\t\t\tif response == Gtk.ResponseType.OK:\n",
"\t\t\t\t\t\tself.path = dialog.get_filenames()\n",
"\t\t\t\t\t\tdialog.destroy()\n",
"\n",
"\t\t\t\t\telif response == Gtk.ResponseType.CANCEL:\n",
"\t\t\t\t\t\tself.path = None\n",
"\t\t\t\t\t\tdialog.destroy()\n",
"\n",
"\t\t\twin = FileChooserWindow()\n",
"\t\t\twin.connect('destroy', Gtk.main_quit)\n",
"\t\t\twin.connect('delete-event', Gtk.main_quit)\n",
"\t\t\twin.show_all()\n",
"\t\t\twin.destroy()\n",
"\t\t\twin.close()\n",
"\n",
"\t\t\treturn win.path\n",
"\n",
"\t\tdef qt5_dialog():\n",
"\n",
"\t\t\t# Qt 5\n",
"\n",
"\t\t\ttry:\n",
"\t\t\t\tfrom PyQt5 import Qt\n",
"\n",
"\t\t\texcept ImportError:\n",
"\t\t\t\t# The API is the same for what this uses\n",
"\t\t\t\tfrom PyQt4 import Qt\n",
"\n",
"\t\t\tclass FileChooserWindow(Qt.QWidget):\n",
"\n",
"\t\t\t\tdef __init__(self):\n",
"\t\t\t\t\tsuper().__init__()\n",
"\n",
"\t\t\t\t\textensions_string = ''\n",
"\n",
"\t\t\t\t\tif extensions:\n",
"\t\t\t\t\t\tfor entry in extensions:\n",
"\t\t\t\t\t\t\t# entry → Filter name (i.e. 'Image Files' etc)\n",
"\t\t\t\t\t\t\t# value → Filter expression (i.e. '*.png, *.jpg'\n",
"\t\t\t\t\t\t\t# etc)\n",
"\t\t\t\t\t\t\textensions_string += '%s (%s);;' % (entry,\n",
"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t' '.join(extensions[entry]))\n",
"\n",
"\t\t\t\t\telse:\n",
"\t\t\t\t\t\textensions_string = 'All Files (*)'\n",
"\n",
"\t\t\t\t\tdialog = Qt.QFileDialog()\n",
"\n",
"\t\t\t\t\tif multiple_files:\n",
"\t\t\t\t\t\tdialog.setFileMode(Qt.QFileDialog.ExistingFiles)\n",
"\n",
"\t\t\t\t\tif directory:\n",
"\t\t\t\t\t\tdialog.setFileMode(Qt.QFileDialog.Directory)\n",
"\n",
"\t\t\t\t\tdialog.setWindowTitle(title)\n",
"\t\t\t\t\tdialog.setDirectory(default_dir)\n",
"\n",
"\t\t\t\t\tdialog.setNameFilter(extensions_string)\n",
"\n",
"\t\t\t\t\tif dialog.exec_():\n",
"\t\t\t\t\t\tself.path = dialog.selectedFiles()\n",
"\n",
"\t\t\t\t\telse:\n",
"\t\t\t\t\t\tself.path = None\n",
"\n",
"\t\t\tapp = Qt.QApplication(sys.argv)\n",
"\n",
"\t\t\twin = FileChooserWindow()\n",
"\t\t\twin.close()\n",
"\n",
"\t\t\tif win.path:\n",
"\t\t\t\treturn win.path\n",
"\n",
"\t\t\telse:\n",
"\t\t\t\treturn None\n",
"\n",
"\t\t\tapp.exec_()\n",
"\n",
"\t\tdef gtk2_dialog():\n",
"\n",
"\t\t\t# GTK+ 2\n",
"\n",
"\t\t\timport pygtk\n",
"\t\t\tpygtk.require('2.0')\n",
"\n",
"\t\t\tdialog = gtk.FileChooserDialog(title, None,\n",
"\t\t\t\t\t\t\t\t\t\t gtk.FILE_CHOOSER_ACTION_OPEN,\n",
"\t\t\t\t\t\t\t\t\t\t (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n",
"\t\t\t\t\t\t\t\t\t\t\tgtk.STOCK_OPEN, gtk.RESPONSE_OK))\n",
"\n",
"\t\t\tdialog.set_default_response(gtk.RESPONSE_OK)\n",
"\n",
"\t\t\tif extensions:\n",
"\t\t\t\tfor entry in extensions:\n",
"\t\t\t\t\tfile_filter = gtk.FileFilter()\n",
"\t\t\t\t\tfile_filter.set_name(entry)\n",
"\n",
"\t\t\t\t\tfor pattern in extensions[entry]:\n",
"\t\t\t\t\t\tfile_filter.add_pattern(pattern)\n",
"\n",
"\t\t\t\t\tdialog.add_filter(file_filter)\n",
"\n",
"\t\t\tdialog.set_select_multiple(multiple_files)\n",
"\n",
"\t\t\tresponse = dialog.run()\n",
"\n",
"\t\t\tif response == gtk.RESPONSE_OK:\n",
"\t\t\t\treturn dialog.get_filenames()\n",
"\n",
"\t\t\telif response == gtk.RESPONSE_CANCEL:\n",
"\t\t\t\treturn None\n",
"\n",
"\t\t\tdialog.destroy()\n",
"\n",
"\t\tif system.get_name() in ['gnome', 'unity', 'cinnamon', 'pantheon']:\n",
"\t\t\treturn gtk3_dialog()\n",
"\n",
"\t\telif system.get_name() in ['kde', 'lxqt']:\n",
"\t\t\ttry:\n",
"\t\t\t\treturn qt5_dialog()\n",
"\n",
"\t\t\texcept ImportError:\n",
"\t\t\t\treturn gtk3_dialog()\n",
"\n",
"\t\telse:\n",
"\t\t\ttry:\n",
"\t\t\t\treturn gtk2_dialog()\n",
"\n",
"\t\t\texcept ImportError:\n",
"\t\t\t\treturn gtk3_dialog()"
] | [
0,
0.04411764705882353,
0.018518518518518517,
0,
0.024096385542168676,
0,
0.02702702702702703,
0,
0.013888888888888888,
0.022222222222222223,
0.013888888888888888,
0.030303030303030304,
0.013888888888888888,
0.018518518518518517,
0.013888888888888888,
0.018518518518518517,
0.013888888888888888,
0.015151515151515152,
0.013888888888888888,
0,
0.038461538461538464,
0,
0.0125,
0.015625,
0.015503875968992248,
0.017543859649122806,
0,
0.14285714285714285,
0.021505376344086023,
0.016129032258064516,
0,
0.027777777777777776,
0,
0.058823529411764705,
0.03571428571428571,
0.058823529411764705,
0,
0.014285714285714285,
0.02040816326530612,
0.014084507042253521,
0,
0.1,
0.01652892561983471,
0.2,
0,
0.02127659574468085,
0,
0.05,
0.05555555555555555,
0,
0.027777777777777776,
0.022727272727272728,
0,
0.029411764705882353,
0.022727272727272728,
0,
0.14285714285714285,
0,
0.047619047619047616,
0,
0.08333333333333333,
0,
0.07692307692307693,
0.027777777777777776,
0,
0.030303030303030304,
0,
0.025,
0,
0.041666666666666664,
0,
0.05,
0,
0.024390243902439025,
0,
0.02040816326530612,
0.06976744186046512,
0.08823529411764706,
0.05263157894736842,
0.06896551724137931,
0.058823529411764705,
0.11764705882352941,
0,
0.05,
0.03225806451612903,
0.02631578947368421,
0.02857142857142857,
0,
0.024390243902439025,
0.024390243902439025,
0,
0.02631578947368421,
0,
0.020833333333333332,
0,
0.022727272727272728,
0,
0.034482758620689655,
0,
0.024390243902439025,
0.024390243902439025,
0.043478260869565216,
0,
0.02127659574468085,
0.043478260869565216,
0.043478260869565216,
0,
0.034482758620689655,
0.024390243902439025,
0.021739130434782608,
0.05555555555555555,
0.058823529411764705,
0.06666666666666667,
0,
0.05263157894736842,
0,
0.05,
0,
0.1,
0,
0.125,
0.04,
0,
0.043478260869565216,
0.022222222222222223,
0.04,
0,
0.025,
0,
0.041666666666666664,
0.041666666666666664,
0,
0.03571428571428571,
0,
0.05,
0.03225806451612903,
0.018518518518518517,
0.017857142857142856,
0.07142857142857142,
0.02,
0.044444444444444446,
0,
0.09090909090909091,
0.023809523809523808,
0,
0.03225806451612903,
0,
0.041666666666666664,
0.01818181818181818,
0,
0.05263157894736842,
0.0196078431372549,
0,
0.029411764705882353,
0.02631578947368421,
0,
0.022222222222222223,
0,
0.041666666666666664,
0.024390243902439025,
0,
0.09090909090909091,
0.043478260869565216,
0,
0.02857142857142857,
0,
0.034482758620689655,
0.06666666666666667,
0,
0.0625,
0.05,
0,
0.1111111111111111,
0.0625,
0,
0.06666666666666667,
0,
0.047619047619047616,
0,
0.08333333333333333,
0,
0.0625,
0.041666666666666664,
0,
0.02127659574468085,
0.06976744186046512,
0.05660377358490566,
0.044444444444444446,
0,
0.020833333333333332,
0,
0.05555555555555555,
0.034482758620689655,
0.027777777777777776,
0.030303030303030304,
0,
0.02564102564102564,
0.02564102564102564,
0,
0.027777777777777776,
0,
0.021739130434782608,
0,
0.037037037037037035,
0,
0.02857142857142857,
0.029411764705882353,
0,
0.024390243902439025,
0.0625,
0,
0.05,
0,
0.014285714285714285,
0.041666666666666664,
0,
0.022222222222222223,
0.125,
0.041666666666666664,
0,
0.043478260869565216,
0.04,
0,
0.125,
0.125,
0.04,
0,
0.043478260869565216,
0.08333333333333333
] | 229 | 0.029356 |
def add(self, num_iid, properties, quantity, price, session, outer_id=None, item_price=None, lang=None):
'''taobao.item.sku.add 添加SKU
新增一个sku到num_iid指定的商品中 传入的iid所对应的商品必须属于当前会话的用户'''
request = TOPRequest('taobao.item.sku.add')
request['num_iid'] = num_iid
request['properties'] = properties
request['quantity'] = quantity
request['price'] = price
if outer_id!=None:
request['outer_id'] = outer_id
if item_price!=None:
request['item_price'] = item_price
if lang!=None:
request['lang'] = lang
self.create(self.execute(request, session)['sku'])
return self | [
"def",
"add",
"(",
"self",
",",
"num_iid",
",",
"properties",
",",
"quantity",
",",
"price",
",",
"session",
",",
"outer_id",
"=",
"None",
",",
"item_price",
"=",
"None",
",",
"lang",
"=",
"None",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'taobao.item.sku.add'",
")",
"request",
"[",
"'num_iid'",
"]",
"=",
"num_iid",
"request",
"[",
"'properties'",
"]",
"=",
"properties",
"request",
"[",
"'quantity'",
"]",
"=",
"quantity",
"request",
"[",
"'price'",
"]",
"=",
"price",
"if",
"outer_id",
"!=",
"None",
":",
"request",
"[",
"'outer_id'",
"]",
"=",
"outer_id",
"if",
"item_price",
"!=",
"None",
":",
"request",
"[",
"'item_price'",
"]",
"=",
"item_price",
"if",
"lang",
"!=",
"None",
":",
"request",
"[",
"'lang'",
"]",
"=",
"lang",
"self",
".",
"create",
"(",
"self",
".",
"execute",
"(",
"request",
",",
"session",
")",
"[",
"'sku'",
"]",
")",
"return",
"self"
] | 39.882353 | 0.014409 | [
"def add(self, num_iid, properties, quantity, price, session, outer_id=None, item_price=None, lang=None):\n",
" '''taobao.item.sku.add 添加SKU\n",
" \n",
" 新增一个sku到num_iid指定的商品中 传入的iid所对应的商品必须属于当前会话的用户'''\n",
" request = TOPRequest('taobao.item.sku.add')\n",
" request['num_iid'] = num_iid\n",
" request['properties'] = properties\n",
" request['quantity'] = quantity\n",
" request['price'] = price\n",
" if outer_id!=None:\n",
" request['outer_id'] = outer_id\n",
" if item_price!=None:\n",
" request['item_price'] = item_price\n",
" if lang!=None:\n",
" request['lang'] = lang\n",
" self.create(self.execute(request, session)['sku'])\n",
" return self"
] | [
0.009523809523809525,
0.02702702702702703,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.07407407407407407,
0,
0.06896551724137931,
0,
0.08695652173913043,
0,
0,
0.05263157894736842
] | 17 | 0.025311 |
def maximum_hline_bundle(self, y0, x0, x1):
"""Compute a maximum set of horizontal lines in the unit cells ``(x,y0)``
for :math:`x0 \leq x \leq x1`.
INPUTS:
y0,x0,x1: int
OUTPUT:
list of lists of qubits
"""
x_range = range(x0, x1 + 1) if x0 < x1 else range(x0, x1 - 1, -1)
hlines = [[(x, y0, 0, k) for x in x_range] for k in range(self.L)]
return list(filter(self._contains_line, hlines)) | [
"def",
"maximum_hline_bundle",
"(",
"self",
",",
"y0",
",",
"x0",
",",
"x1",
")",
":",
"x_range",
"=",
"range",
"(",
"x0",
",",
"x1",
"+",
"1",
")",
"if",
"x0",
"<",
"x1",
"else",
"range",
"(",
"x0",
",",
"x1",
"-",
"1",
",",
"-",
"1",
")",
"hlines",
"=",
"[",
"[",
"(",
"x",
",",
"y0",
",",
"0",
",",
"k",
")",
"for",
"x",
"in",
"x_range",
"]",
"for",
"k",
"in",
"range",
"(",
"self",
".",
"L",
")",
"]",
"return",
"list",
"(",
"filter",
"(",
"self",
".",
"_contains_line",
",",
"hlines",
")",
")"
] | 35.846154 | 0.01046 | [
"def maximum_hline_bundle(self, y0, x0, x1):\n",
" \"\"\"Compute a maximum set of horizontal lines in the unit cells ``(x,y0)``\n",
" for :math:`x0 \\leq x \\leq x1`.\n",
"\n",
" INPUTS:\n",
" y0,x0,x1: int\n",
"\n",
" OUTPUT:\n",
" list of lists of qubits\n",
" \"\"\"\n",
" x_range = range(x0, x1 + 1) if x0 < x1 else range(x0, x1 - 1, -1)\n",
" hlines = [[(x, y0, 0, k) for x in x_range] for k in range(self.L)]\n",
" return list(filter(self._contains_line, hlines))"
] | [
0,
0.024390243902439025,
0.05128205128205128,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856
] | 13 | 0.007195 |
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else [] | [
"def",
"type_list",
"(",
"signature",
",",
"doc",
",",
"header",
")",
":",
"lines",
"=",
"[",
"]",
"docced",
"=",
"set",
"(",
")",
"lines",
".",
"append",
"(",
"header",
")",
"try",
":",
"for",
"names",
",",
"types",
",",
"description",
"in",
"doc",
":",
"names",
",",
"types",
"=",
"_get_names",
"(",
"names",
",",
"types",
")",
"unannotated",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"docced",
".",
"add",
"(",
"name",
")",
"try",
":",
"typ",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"annotation",
"if",
"typ",
"==",
"inspect",
".",
"_empty",
":",
"raise",
"AttributeError",
"default",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"default",
"type_string",
"=",
"string_annotation",
"(",
"typ",
",",
"default",
")",
"lines",
".",
"append",
"(",
"f\"- `{name}`: {type_string}\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"unannotated",
".",
"append",
"(",
"name",
")",
"# No annotation",
"if",
"len",
"(",
"unannotated",
")",
">",
"0",
":",
"lines",
".",
"append",
"(",
"\"- \"",
")",
"lines",
".",
"append",
"(",
"\", \"",
".",
"join",
"(",
"f\"`{name}`\"",
"for",
"name",
"in",
"unannotated",
")",
")",
"if",
"types",
"!=",
"\"\"",
"and",
"len",
"(",
"unannotated",
")",
">",
"0",
":",
"lines",
".",
"append",
"(",
"f\": {mangle_types(types)}\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"lines",
".",
"append",
"(",
"f\" {' '.join(description)}\\n\\n\"",
")",
"for",
"names",
",",
"types",
",",
"description",
"in",
"doc",
":",
"names",
",",
"types",
"=",
"_get_names",
"(",
"names",
",",
"types",
")",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"not",
"in",
"docced",
":",
"try",
":",
"typ",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"annotation",
"default",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"default",
"type_string",
"=",
"string_annotation",
"(",
"typ",
",",
"default",
")",
"lines",
".",
"append",
"(",
"f\"- `{name}`: {type_string}\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"lines",
".",
"append",
"(",
"f\"- `{name}`\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"lines",
"if",
"len",
"(",
"lines",
")",
">",
"1",
"else",
"[",
"]"
] | 36.295082 | 0.00044 | [
"def type_list(signature, doc, header):\n",
" \"\"\"\n",
" Construct a list of types, preferring type annotations to\n",
" docstrings if they are available.\n",
"\n",
" Parameters\n",
" ----------\n",
" signature : Signature\n",
" Signature of thing\n",
" doc : list of tuple\n",
" Numpydoc's type list section\n",
"\n",
" Returns\n",
" -------\n",
" list of str\n",
" Markdown formatted type list\n",
" \"\"\"\n",
"\n",
" lines = []\n",
" docced = set()\n",
" lines.append(header)\n",
" try:\n",
" for names, types, description in doc:\n",
" names, types = _get_names(names, types)\n",
" unannotated = []\n",
" for name in names:\n",
" docced.add(name)\n",
" try:\n",
" typ = signature.parameters[name].annotation\n",
" if typ == inspect._empty:\n",
" raise AttributeError\n",
" default = signature.parameters[name].default\n",
" type_string = string_annotation(typ, default)\n",
" lines.append(f\"- `{name}`: {type_string}\")\n",
" lines.append(\"\\n\\n\")\n",
" except (AttributeError, KeyError):\n",
" unannotated.append(name) # No annotation\n",
"\n",
" if len(unannotated) > 0:\n",
" lines.append(\"- \")\n",
" lines.append(\", \".join(f\"`{name}`\" for name in unannotated))\n",
" if types != \"\" and len(unannotated) > 0:\n",
" lines.append(f\": {mangle_types(types)}\")\n",
" lines.append(\"\\n\\n\")\n",
" lines.append(f\" {' '.join(description)}\\n\\n\")\n",
" for names, types, description in doc:\n",
" names, types = _get_names(names, types)\n",
" for name in names:\n",
" if name not in docced:\n",
" try:\n",
" typ = signature.parameters[name].annotation\n",
" default = signature.parameters[name].default\n",
" type_string = string_annotation(typ, default)\n",
" lines.append(f\"- `{name}`: {type_string}\")\n",
" lines.append(\"\\n\\n\")\n",
" except (AttributeError, KeyError):\n",
" lines.append(f\"- `{name}`\")\n",
" lines.append(\"\\n\\n\")\n",
" except Exception as e:\n",
" print(e)\n",
" return lines if len(lines) > 1 else []"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808
] | 61 | 0.00039 |
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super().transpose(1, 0, **kwargs) | [
"def",
"transpose",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_transpose",
"(",
"args",
",",
"dict",
"(",
")",
")",
"return",
"super",
"(",
")",
".",
"transpose",
"(",
"1",
",",
"0",
",",
"*",
"*",
"kwargs",
")"
] | 27.835052 | 0.000715 | [
"def transpose(self, *args, **kwargs):\n",
" \"\"\"\n",
" Transpose index and columns.\n",
"\n",
" Reflect the DataFrame over its main diagonal by writing rows as columns\n",
" and vice-versa. The property :attr:`.T` is an accessor to the method\n",
" :meth:`transpose`.\n",
"\n",
" Parameters\n",
" ----------\n",
" copy : bool, default False\n",
" If True, the underlying data is copied. Otherwise (default), no\n",
" copy is made if possible.\n",
" *args, **kwargs\n",
" Additional keywords have no effect but might be accepted for\n",
" compatibility with numpy.\n",
"\n",
" Returns\n",
" -------\n",
" DataFrame\n",
" The transposed DataFrame.\n",
"\n",
" See Also\n",
" --------\n",
" numpy.transpose : Permute the dimensions of a given array.\n",
"\n",
" Notes\n",
" -----\n",
" Transposing a DataFrame with mixed dtypes will result in a homogeneous\n",
" DataFrame with the `object` dtype. In such a case, a copy of the data\n",
" is always made.\n",
"\n",
" Examples\n",
" --------\n",
" **Square DataFrame with homogeneous dtype**\n",
"\n",
" >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n",
" >>> df1 = pd.DataFrame(data=d1)\n",
" >>> df1\n",
" col1 col2\n",
" 0 1 3\n",
" 1 2 4\n",
"\n",
" >>> df1_transposed = df1.T # or df1.transpose()\n",
" >>> df1_transposed\n",
" 0 1\n",
" col1 1 2\n",
" col2 3 4\n",
"\n",
" When the dtype is homogeneous in the original DataFrame, we get a\n",
" transposed DataFrame with the same dtype:\n",
"\n",
" >>> df1.dtypes\n",
" col1 int64\n",
" col2 int64\n",
" dtype: object\n",
" >>> df1_transposed.dtypes\n",
" 0 int64\n",
" 1 int64\n",
" dtype: object\n",
"\n",
" **Non-square DataFrame with mixed dtypes**\n",
"\n",
" >>> d2 = {'name': ['Alice', 'Bob'],\n",
" ... 'score': [9.5, 8],\n",
" ... 'employed': [False, True],\n",
" ... 'kids': [0, 0]}\n",
" >>> df2 = pd.DataFrame(data=d2)\n",
" >>> df2\n",
" name score employed kids\n",
" 0 Alice 9.5 False 0\n",
" 1 Bob 8.0 True 0\n",
"\n",
" >>> df2_transposed = df2.T # or df2.transpose()\n",
" >>> df2_transposed\n",
" 0 1\n",
" name Alice Bob\n",
" score 9.5 8\n",
" employed False True\n",
" kids 0 0\n",
"\n",
" When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n",
" the `object` dtype:\n",
"\n",
" >>> df2.dtypes\n",
" name object\n",
" score float64\n",
" employed bool\n",
" kids int64\n",
" dtype: object\n",
" >>> df2_transposed.dtypes\n",
" 0 object\n",
" 1 object\n",
" dtype: object\n",
" \"\"\"\n",
" nv.validate_transpose(args, dict())\n",
" return super().transpose(1, 0, **kwargs)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332
] | 97 | 0.001074 |
def get_as_map(self, key):
"""
Converts map element into an AnyValueMap or returns empty AnyValueMap if conversion is not possible.
:param key: a key of element to get.
:return: AnyValueMap value of the element or empty AnyValueMap if conversion is not supported.
"""
if key == None:
map = {}
for (k, v) in self.items():
map[k] = v
return map
else:
value = self.get(key)
return MapConverter.to_map(value) | [
"def",
"get_as_map",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"==",
"None",
":",
"map",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"items",
"(",
")",
":",
"map",
"[",
"k",
"]",
"=",
"v",
"return",
"map",
"else",
":",
"value",
"=",
"self",
".",
"get",
"(",
"key",
")",
"return",
"MapConverter",
".",
"to_map",
"(",
"value",
")"
] | 32.6875 | 0.009294 | [
"def get_as_map(self, key):\n",
" \"\"\"\n",
" Converts map element into an AnyValueMap or returns empty AnyValueMap if conversion is not possible.\n",
"\n",
" :param key: a key of element to get.\n",
"\n",
" :return: AnyValueMap value of the element or empty AnyValueMap if conversion is not supported.\n",
" \"\"\"\n",
" if key == None:\n",
" map = {}\n",
" for (k, v) in self.items():\n",
" map[k] = v\n",
" return map\n",
" else:\n",
" value = self.get(key)\n",
" return MapConverter.to_map(value)"
] | [
0,
0.08333333333333333,
0.009174311926605505,
0,
0,
0,
0.009708737864077669,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0.022222222222222223
] | 16 | 0.010382 |
def get_link_or_none(pattern_name, request, view_kwargs=None):
"""
Helper that generate URL prom pattern name and kwargs and check if current request has permission to open the URL.
If not None is returned.
Args:
pattern_name (str): slug which is used for view registratin to pattern
request (django.http.request.HttpRequest): Django request object
view_kwargs (dict): list of kwargs necessary for URL generator
Returns:
"""
from is_core.patterns import reverse_pattern
pattern = reverse_pattern(pattern_name)
assert pattern is not None, 'Invalid pattern name {}'.format(pattern_name)
if pattern.has_permission('get', request, view_kwargs=view_kwargs):
return pattern.get_url_string(request, view_kwargs=view_kwargs)
else:
return None | [
"def",
"get_link_or_none",
"(",
"pattern_name",
",",
"request",
",",
"view_kwargs",
"=",
"None",
")",
":",
"from",
"is_core",
".",
"patterns",
"import",
"reverse_pattern",
"pattern",
"=",
"reverse_pattern",
"(",
"pattern_name",
")",
"assert",
"pattern",
"is",
"not",
"None",
",",
"'Invalid pattern name {}'",
".",
"format",
"(",
"pattern_name",
")",
"if",
"pattern",
".",
"has_permission",
"(",
"'get'",
",",
"request",
",",
"view_kwargs",
"=",
"view_kwargs",
")",
":",
"return",
"pattern",
".",
"get_url_string",
"(",
"request",
",",
"view_kwargs",
"=",
"view_kwargs",
")",
"else",
":",
"return",
"None"
] | 36.454545 | 0.00243 | [
"def get_link_or_none(pattern_name, request, view_kwargs=None):\n",
" \"\"\"\n",
" Helper that generate URL prom pattern name and kwargs and check if current request has permission to open the URL.\n",
" If not None is returned.\n",
"\n",
" Args:\n",
" pattern_name (str): slug which is used for view registratin to pattern\n",
" request (django.http.request.HttpRequest): Django request object\n",
" view_kwargs (dict): list of kwargs necessary for URL generator\n",
"\n",
" Returns:\n",
"\n",
" \"\"\"\n",
" from is_core.patterns import reverse_pattern\n",
"\n",
" pattern = reverse_pattern(pattern_name)\n",
" assert pattern is not None, 'Invalid pattern name {}'.format(pattern_name)\n",
"\n",
" if pattern.has_permission('get', request, view_kwargs=view_kwargs):\n",
" return pattern.get_url_string(request, view_kwargs=view_kwargs)\n",
" else:\n",
" return None"
] | [
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 22 | 0.002774 |
def stream(self, handler, whenDone=None):
"""
Fetches data from river streams and feeds them into the given function.
:param handler: (function) passed headers [list] and row [list] of the data
for one time step, for every row of data
"""
self._createConfluence()
headers = ["timestamp"] + self.getStreamIds()
for row in self._confluence:
handler(headers, row)
if whenDone is not None:
return whenDone() | [
"def",
"stream",
"(",
"self",
",",
"handler",
",",
"whenDone",
"=",
"None",
")",
":",
"self",
".",
"_createConfluence",
"(",
")",
"headers",
"=",
"[",
"\"timestamp\"",
"]",
"+",
"self",
".",
"getStreamIds",
"(",
")",
"for",
"row",
"in",
"self",
".",
"_confluence",
":",
"handler",
"(",
"headers",
",",
"row",
")",
"if",
"whenDone",
"is",
"not",
"None",
":",
"return",
"whenDone",
"(",
")"
] | 35.384615 | 0.008475 | [
"def stream(self, handler, whenDone=None):\n",
" \"\"\"\n",
" Fetches data from river streams and feeds them into the given function.\n",
" :param handler: (function) passed headers [list] and row [list] of the data\n",
" for one time step, for every row of data\n",
" \"\"\"\n",
" self._createConfluence()\n",
" headers = [\"timestamp\"] + self.getStreamIds()\n",
" for row in self._confluence:\n",
" handler(headers, row)\n",
" \n",
" if whenDone is not None:\n",
" return whenDone()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0.2,
0,
0.08695652173913043
] | 13 | 0.024821 |
def blum_blum_shub(seed, amount, prime0, prime1):
"""Creates pseudo-number generator
:param seed: seeder
:param amount: amount of number to generate
:param prime0: one prime number
:param prime1: the second prime number
:return: pseudo-number generator
"""
if amount == 0:
return []
assert (prime0 % 4 == 3 and
prime1 % 4 == 3) # primes must be congruent 3 mod 4
mod = prime0 * prime1
rand = [seed]
for _ in range(amount - 1):
last_num = rand[len(rand) - 1]
next_num = (last_num * last_num) % mod
rand.append(next_num)
return rand | [
"def",
"blum_blum_shub",
"(",
"seed",
",",
"amount",
",",
"prime0",
",",
"prime1",
")",
":",
"if",
"amount",
"==",
"0",
":",
"return",
"[",
"]",
"assert",
"(",
"prime0",
"%",
"4",
"==",
"3",
"and",
"prime1",
"%",
"4",
"==",
"3",
")",
"# primes must be congruent 3 mod 4",
"mod",
"=",
"prime0",
"*",
"prime1",
"rand",
"=",
"[",
"seed",
"]",
"for",
"_",
"in",
"range",
"(",
"amount",
"-",
"1",
")",
":",
"last_num",
"=",
"rand",
"[",
"len",
"(",
"rand",
")",
"-",
"1",
"]",
"next_num",
"=",
"(",
"last_num",
"*",
"last_num",
")",
"%",
"mod",
"rand",
".",
"append",
"(",
"next_num",
")",
"return",
"rand"
] | 25.375 | 0.001582 | [
"def blum_blum_shub(seed, amount, prime0, prime1):\n",
" \"\"\"Creates pseudo-number generator\n",
"\n",
" :param seed: seeder\n",
" :param amount: amount of number to generate\n",
" :param prime0: one prime number\n",
" :param prime1: the second prime number\n",
" :return: pseudo-number generator\n",
" \"\"\"\n",
" if amount == 0:\n",
" return []\n",
"\n",
" assert (prime0 % 4 == 3 and\n",
" prime1 % 4 == 3) # primes must be congruent 3 mod 4\n",
"\n",
" mod = prime0 * prime1\n",
" rand = [seed]\n",
"\n",
" for _ in range(amount - 1):\n",
" last_num = rand[len(rand) - 1]\n",
" next_num = (last_num * last_num) % mod\n",
" rand.append(next_num)\n",
"\n",
" return rand"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 24 | 0.002778 |
def unregisterView(viewType, location='Central'):
"""
Unregisteres the given view type from the inputed location.
:param viewType | <subclass of XView>
"""
XView._registry.get(location, {}).pop(viewType.viewName(), None)
XView.dispatch(location).emit('unregisteredView(QVariant)', viewType) | [
"def",
"unregisterView",
"(",
"viewType",
",",
"location",
"=",
"'Central'",
")",
":",
"XView",
".",
"_registry",
".",
"get",
"(",
"location",
",",
"{",
"}",
")",
".",
"pop",
"(",
"viewType",
".",
"viewName",
"(",
")",
",",
"None",
")",
"XView",
".",
"dispatch",
"(",
"location",
")",
".",
"emit",
"(",
"'unregisteredView(QVariant)'",
",",
"viewType",
")"
] | 43.125 | 0.008523 | [
"def unregisterView(viewType, location='Central'):\n",
" \"\"\"\n",
" Unregisteres the given view type from the inputed location.\n",
" \n",
" :param viewType | <subclass of XView>\n",
" \"\"\"\n",
" XView._registry.get(location, {}).pop(viewType.viewName(), None)\n",
" XView.dispatch(location).emit('unregisteredView(QVariant)', viewType)"
] | [
0,
0.08333333333333333,
0,
0.1111111111111111,
0,
0,
0,
0.012987012987012988
] | 8 | 0.025929 |
def eagle(args):
"""
%prog eagle fastafile
"""
p = OptionParser(eagle.__doc__)
p.add_option("--share", default="/usr/local/share/EAGLE/",
help="Default EAGLE share path")
add_sim_options(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
share = opts.share
depth = opts.depth
readlen = opts.readlen
distance = opts.distance
pf = op.basename(fastafile).split(".")[0]
# Since EAGLE does not natively support read length other than 100bp and
# 250bp - for an arbitrary read length we need to generate a bunch of
# support files
# First file is the Runinfo
runinfo_readlen = "RunInfo_PairedReads2x{}Cycles1x1Tiles.xml".format(readlen)
if not op.exists(runinfo_readlen):
runinfo = op.join(share, "RunInfo/RunInfo_PairedReads2x251Cycles1x1Tiles.xml")
runinfo_xml = open(runinfo).read()
runinfo_xml = runinfo_xml.replace("251", str(readlen))\
.replace("252", str(readlen + 1))\
.replace("502", str(2 * readlen))
fw = open(runinfo_readlen, "w")
print(runinfo_xml.strip(), file=fw)
fw.close()
# Generate quality profiles
quality_file1 = "QualityTable.read1.length{}.qval".format(readlen)
quality_file2 = "QualityTable.read2.length{}.qval".format(readlen)
if not (op.exists(quality_file1) and op.exists(quality_file2)):
for i, qq in enumerate([quality_file1, quality_file2]):
cmd = "/usr/local/libexec/EAGLE/scaleQualityTable.pl"
cmd += " --input {}".format(op.join(share,
"QualityTables/DefaultQualityTable.read{}.length101.qval".format(i + 1)))
cmd += " --cycles {}".format(readlen)
cmd += " --output {}".format(qq)
sh(cmd, silent=True)
# Since distance is different from the default distribution which is
# centered around 319, we shift our peak to the new peak
template_lengths = op.join(share,
"TemplateLengthTables/DefaultTemplateLengthTable.tsv")
template_distance = "TemplateLengthTable{}.tsv".format(distance)
shift = distance - 319
if not op.exists(template_distance):
fp = open(template_lengths)
fw = open(template_distance, "w")
for row in fp:
size, counts = row.split()
size = int(size)
counts = int(counts)
size += shift
if size < readlen:
continue
print("\t".join(str(x) for x in (size, counts)), file=fw)
fw.close()
# All done, let's simulate!
cmd = "configureEAGLE.pl"
cmd += " --reference-genome {}".format(fastafile)
cmd += " --coverage-depth {}".format(depth)
cmd += " --gc-coverage-fit-table {}".format(op.join(share,
"GcCoverageFitTables/Homo_sapiens.example1.tsv"))
cmd += " --run-info {}".format(runinfo_readlen)
cmd += " --quality-table {}".format(quality_file1)
cmd += " --quality-table {}".format(quality_file2)
cmd += " --template-length-table {}".format(template_distance)
cmd += " --random-seed {}".format(random.randint(1, 65535))
sh(cmd, silent=True)
# Retrieve results
outpf = opts.outfile or "{0}.{1}bp.{2}x".format(pf, distance, depth)
outpf += ".bwa"
cwd = os.getcwd()
eagle_dir = "EAGLE"
os.chdir(eagle_dir)
sh("make bam", silent=True)
# Convert BAM to FASTQ
from jcvi.formats.sam import fastq
a, b = fastq(["eagle.bam", outpf])
sh("mv {} {} ../".format(a, b))
os.chdir(cwd)
# Clean-up
shutil.rmtree(eagle_dir) | [
"def",
"eagle",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"eagle",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--share\"",
",",
"default",
"=",
"\"/usr/local/share/EAGLE/\"",
",",
"help",
"=",
"\"Default EAGLE share path\"",
")",
"add_sim_options",
"(",
"p",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"fastafile",
",",
"=",
"args",
"share",
"=",
"opts",
".",
"share",
"depth",
"=",
"opts",
".",
"depth",
"readlen",
"=",
"opts",
".",
"readlen",
"distance",
"=",
"opts",
".",
"distance",
"pf",
"=",
"op",
".",
"basename",
"(",
"fastafile",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"# Since EAGLE does not natively support read length other than 100bp and",
"# 250bp - for an arbitrary read length we need to generate a bunch of",
"# support files",
"# First file is the Runinfo",
"runinfo_readlen",
"=",
"\"RunInfo_PairedReads2x{}Cycles1x1Tiles.xml\"",
".",
"format",
"(",
"readlen",
")",
"if",
"not",
"op",
".",
"exists",
"(",
"runinfo_readlen",
")",
":",
"runinfo",
"=",
"op",
".",
"join",
"(",
"share",
",",
"\"RunInfo/RunInfo_PairedReads2x251Cycles1x1Tiles.xml\"",
")",
"runinfo_xml",
"=",
"open",
"(",
"runinfo",
")",
".",
"read",
"(",
")",
"runinfo_xml",
"=",
"runinfo_xml",
".",
"replace",
"(",
"\"251\"",
",",
"str",
"(",
"readlen",
")",
")",
".",
"replace",
"(",
"\"252\"",
",",
"str",
"(",
"readlen",
"+",
"1",
")",
")",
".",
"replace",
"(",
"\"502\"",
",",
"str",
"(",
"2",
"*",
"readlen",
")",
")",
"fw",
"=",
"open",
"(",
"runinfo_readlen",
",",
"\"w\"",
")",
"print",
"(",
"runinfo_xml",
".",
"strip",
"(",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"# Generate quality profiles",
"quality_file1",
"=",
"\"QualityTable.read1.length{}.qval\"",
".",
"format",
"(",
"readlen",
")",
"quality_file2",
"=",
"\"QualityTable.read2.length{}.qval\"",
".",
"format",
"(",
"readlen",
")",
"if",
"not",
"(",
"op",
".",
"exists",
"(",
"quality_file1",
")",
"and",
"op",
".",
"exists",
"(",
"quality_file2",
")",
")",
":",
"for",
"i",
",",
"qq",
"in",
"enumerate",
"(",
"[",
"quality_file1",
",",
"quality_file2",
"]",
")",
":",
"cmd",
"=",
"\"/usr/local/libexec/EAGLE/scaleQualityTable.pl\"",
"cmd",
"+=",
"\" --input {}\"",
".",
"format",
"(",
"op",
".",
"join",
"(",
"share",
",",
"\"QualityTables/DefaultQualityTable.read{}.length101.qval\"",
".",
"format",
"(",
"i",
"+",
"1",
")",
")",
")",
"cmd",
"+=",
"\" --cycles {}\"",
".",
"format",
"(",
"readlen",
")",
"cmd",
"+=",
"\" --output {}\"",
".",
"format",
"(",
"qq",
")",
"sh",
"(",
"cmd",
",",
"silent",
"=",
"True",
")",
"# Since distance is different from the default distribution which is",
"# centered around 319, we shift our peak to the new peak",
"template_lengths",
"=",
"op",
".",
"join",
"(",
"share",
",",
"\"TemplateLengthTables/DefaultTemplateLengthTable.tsv\"",
")",
"template_distance",
"=",
"\"TemplateLengthTable{}.tsv\"",
".",
"format",
"(",
"distance",
")",
"shift",
"=",
"distance",
"-",
"319",
"if",
"not",
"op",
".",
"exists",
"(",
"template_distance",
")",
":",
"fp",
"=",
"open",
"(",
"template_lengths",
")",
"fw",
"=",
"open",
"(",
"template_distance",
",",
"\"w\"",
")",
"for",
"row",
"in",
"fp",
":",
"size",
",",
"counts",
"=",
"row",
".",
"split",
"(",
")",
"size",
"=",
"int",
"(",
"size",
")",
"counts",
"=",
"int",
"(",
"counts",
")",
"size",
"+=",
"shift",
"if",
"size",
"<",
"readlen",
":",
"continue",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"size",
",",
"counts",
")",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"# All done, let's simulate!",
"cmd",
"=",
"\"configureEAGLE.pl\"",
"cmd",
"+=",
"\" --reference-genome {}\"",
".",
"format",
"(",
"fastafile",
")",
"cmd",
"+=",
"\" --coverage-depth {}\"",
".",
"format",
"(",
"depth",
")",
"cmd",
"+=",
"\" --gc-coverage-fit-table {}\"",
".",
"format",
"(",
"op",
".",
"join",
"(",
"share",
",",
"\"GcCoverageFitTables/Homo_sapiens.example1.tsv\"",
")",
")",
"cmd",
"+=",
"\" --run-info {}\"",
".",
"format",
"(",
"runinfo_readlen",
")",
"cmd",
"+=",
"\" --quality-table {}\"",
".",
"format",
"(",
"quality_file1",
")",
"cmd",
"+=",
"\" --quality-table {}\"",
".",
"format",
"(",
"quality_file2",
")",
"cmd",
"+=",
"\" --template-length-table {}\"",
".",
"format",
"(",
"template_distance",
")",
"cmd",
"+=",
"\" --random-seed {}\"",
".",
"format",
"(",
"random",
".",
"randint",
"(",
"1",
",",
"65535",
")",
")",
"sh",
"(",
"cmd",
",",
"silent",
"=",
"True",
")",
"# Retrieve results",
"outpf",
"=",
"opts",
".",
"outfile",
"or",
"\"{0}.{1}bp.{2}x\"",
".",
"format",
"(",
"pf",
",",
"distance",
",",
"depth",
")",
"outpf",
"+=",
"\".bwa\"",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"eagle_dir",
"=",
"\"EAGLE\"",
"os",
".",
"chdir",
"(",
"eagle_dir",
")",
"sh",
"(",
"\"make bam\"",
",",
"silent",
"=",
"True",
")",
"# Convert BAM to FASTQ",
"from",
"jcvi",
".",
"formats",
".",
"sam",
"import",
"fastq",
"a",
",",
"b",
"=",
"fastq",
"(",
"[",
"\"eagle.bam\"",
",",
"outpf",
"]",
")",
"sh",
"(",
"\"mv {} {} ../\"",
".",
"format",
"(",
"a",
",",
"b",
")",
")",
"os",
".",
"chdir",
"(",
"cwd",
")",
"# Clean-up",
"shutil",
".",
"rmtree",
"(",
"eagle_dir",
")"
] | 37.103093 | 0.002436 | [
"def eagle(args):\n",
" \"\"\"\n",
" %prog eagle fastafile\n",
"\n",
" \"\"\"\n",
" p = OptionParser(eagle.__doc__)\n",
" p.add_option(\"--share\", default=\"/usr/local/share/EAGLE/\",\n",
" help=\"Default EAGLE share path\")\n",
" add_sim_options(p)\n",
" opts, args = p.parse_args(args)\n",
"\n",
" if len(args) != 1:\n",
" sys.exit(not p.print_help())\n",
"\n",
" fastafile, = args\n",
" share = opts.share\n",
" depth = opts.depth\n",
" readlen = opts.readlen\n",
" distance = opts.distance\n",
" pf = op.basename(fastafile).split(\".\")[0]\n",
"\n",
" # Since EAGLE does not natively support read length other than 100bp and\n",
" # 250bp - for an arbitrary read length we need to generate a bunch of\n",
" # support files\n",
"\n",
" # First file is the Runinfo\n",
" runinfo_readlen = \"RunInfo_PairedReads2x{}Cycles1x1Tiles.xml\".format(readlen)\n",
" if not op.exists(runinfo_readlen):\n",
" runinfo = op.join(share, \"RunInfo/RunInfo_PairedReads2x251Cycles1x1Tiles.xml\")\n",
" runinfo_xml = open(runinfo).read()\n",
" runinfo_xml = runinfo_xml.replace(\"251\", str(readlen))\\\n",
" .replace(\"252\", str(readlen + 1))\\\n",
" .replace(\"502\", str(2 * readlen))\n",
" fw = open(runinfo_readlen, \"w\")\n",
" print(runinfo_xml.strip(), file=fw)\n",
" fw.close()\n",
"\n",
" # Generate quality profiles\n",
" quality_file1 = \"QualityTable.read1.length{}.qval\".format(readlen)\n",
" quality_file2 = \"QualityTable.read2.length{}.qval\".format(readlen)\n",
" if not (op.exists(quality_file1) and op.exists(quality_file2)):\n",
" for i, qq in enumerate([quality_file1, quality_file2]):\n",
" cmd = \"/usr/local/libexec/EAGLE/scaleQualityTable.pl\"\n",
" cmd += \" --input {}\".format(op.join(share,\n",
" \"QualityTables/DefaultQualityTable.read{}.length101.qval\".format(i + 1)))\n",
" cmd += \" --cycles {}\".format(readlen)\n",
" cmd += \" --output {}\".format(qq)\n",
" sh(cmd, silent=True)\n",
"\n",
" # Since distance is different from the default distribution which is\n",
" # centered around 319, we shift our peak to the new peak\n",
" template_lengths = op.join(share,\n",
" \"TemplateLengthTables/DefaultTemplateLengthTable.tsv\")\n",
" template_distance = \"TemplateLengthTable{}.tsv\".format(distance)\n",
" shift = distance - 319\n",
" if not op.exists(template_distance):\n",
" fp = open(template_lengths)\n",
" fw = open(template_distance, \"w\")\n",
" for row in fp:\n",
" size, counts = row.split()\n",
" size = int(size)\n",
" counts = int(counts)\n",
" size += shift\n",
" if size < readlen:\n",
" continue\n",
" print(\"\\t\".join(str(x) for x in (size, counts)), file=fw)\n",
" fw.close()\n",
"\n",
" # All done, let's simulate!\n",
" cmd = \"configureEAGLE.pl\"\n",
" cmd += \" --reference-genome {}\".format(fastafile)\n",
" cmd += \" --coverage-depth {}\".format(depth)\n",
" cmd += \" --gc-coverage-fit-table {}\".format(op.join(share,\n",
" \"GcCoverageFitTables/Homo_sapiens.example1.tsv\"))\n",
" cmd += \" --run-info {}\".format(runinfo_readlen)\n",
" cmd += \" --quality-table {}\".format(quality_file1)\n",
" cmd += \" --quality-table {}\".format(quality_file2)\n",
" cmd += \" --template-length-table {}\".format(template_distance)\n",
" cmd += \" --random-seed {}\".format(random.randint(1, 65535))\n",
" sh(cmd, silent=True)\n",
"\n",
" # Retrieve results\n",
" outpf = opts.outfile or \"{0}.{1}bp.{2}x\".format(pf, distance, depth)\n",
" outpf += \".bwa\"\n",
" cwd = os.getcwd()\n",
" eagle_dir = \"EAGLE\"\n",
" os.chdir(eagle_dir)\n",
" sh(\"make bam\", silent=True)\n",
"\n",
" # Convert BAM to FASTQ\n",
" from jcvi.formats.sam import fastq\n",
" a, b = fastq([\"eagle.bam\", outpf])\n",
" sh(\"mv {} {} ../\".format(a, b))\n",
" os.chdir(cwd)\n",
"\n",
" # Clean-up\n",
" shutil.rmtree(eagle_dir)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0.015151515151515152,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571
] | 97 | 0.001629 |
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url | [
"def",
"_get_tns_search_results",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``_get_tns_search_results`` method'",
")",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"\"http://wis-tns.weizmann.ac.il/search\"",
",",
"params",
"=",
"{",
"\"page\"",
":",
"self",
".",
"page",
",",
"\"ra\"",
":",
"self",
".",
"ra",
",",
"\"decl\"",
":",
"self",
".",
"dec",
",",
"\"radius\"",
":",
"self",
".",
"radiusArcsec",
",",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"internal_name\"",
":",
"self",
".",
"internal_name",
",",
"\"date_start[date]\"",
":",
"self",
".",
"start",
",",
"\"date_end[date]\"",
":",
"self",
".",
"end",
",",
"\"num_page\"",
":",
"self",
".",
"batchSize",
",",
"\"display[redshift]\"",
":",
"\"1\"",
",",
"\"display[hostname]\"",
":",
"\"1\"",
",",
"\"display[host_redshift]\"",
":",
"\"1\"",
",",
"\"display[source_group_name]\"",
":",
"\"1\"",
",",
"\"display[internal_name]\"",
":",
"\"1\"",
",",
"\"display[spectra_count]\"",
":",
"\"1\"",
",",
"\"display[discoverymag]\"",
":",
"\"1\"",
",",
"\"display[discmagfilter]\"",
":",
"\"1\"",
",",
"\"display[discoverydate]\"",
":",
"\"1\"",
",",
"\"display[discoverer]\"",
":",
"\"1\"",
",",
"\"display[sources]\"",
":",
"\"1\"",
",",
"\"display[bibcode]\"",
":",
"\"1\"",
",",
"}",
",",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"print",
"(",
"'HTTP Request failed'",
")",
"self",
".",
"log",
".",
"info",
"(",
"'completed the ``_get_tns_search_results`` method'",
")",
"return",
"response",
".",
"status_code",
",",
"response",
".",
"content",
",",
"response",
".",
"url"
] | 38.875 | 0.001255 | [
"def _get_tns_search_results(\n",
" self):\n",
" \"\"\"\n",
" *query the tns and result the response*\n",
" \"\"\"\n",
" self.log.info('starting the ``_get_tns_search_results`` method')\n",
"\n",
" try:\n",
" response = requests.get(\n",
" url=\"http://wis-tns.weizmann.ac.il/search\",\n",
" params={\n",
" \"page\": self.page,\n",
" \"ra\": self.ra,\n",
" \"decl\": self.dec,\n",
" \"radius\": self.radiusArcsec,\n",
" \"name\": self.name,\n",
" \"internal_name\": self.internal_name,\n",
" \"date_start[date]\": self.start,\n",
" \"date_end[date]\": self.end,\n",
" \"num_page\": self.batchSize,\n",
" \"display[redshift]\": \"1\",\n",
" \"display[hostname]\": \"1\",\n",
" \"display[host_redshift]\": \"1\",\n",
" \"display[source_group_name]\": \"1\",\n",
" \"display[internal_name]\": \"1\",\n",
" \"display[spectra_count]\": \"1\",\n",
" \"display[discoverymag]\": \"1\",\n",
" \"display[discmagfilter]\": \"1\",\n",
" \"display[discoverydate]\": \"1\",\n",
" \"display[discoverer]\": \"1\",\n",
" \"display[sources]\": \"1\",\n",
" \"display[bibcode]\": \"1\",\n",
" },\n",
" )\n",
"\n",
" except requests.exceptions.RequestException:\n",
" print('HTTP Request failed')\n",
"\n",
" self.log.info('completed the ``_get_tns_search_results`` method')\n",
" return response.status_code, response.content, response.url"
] | [
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358
] | 40 | 0.002456 |
def main(args,parser,subparser):
'''the list command corresponds with listing images for an external
resource. This is different from listing images that are local to the
database, which should be done with "images"
'''
from sregistry.main import get_client
cli = get_client(quiet=args.quiet)
for query in args.query:
if query in ['','*']:
query = None
cli.ls(query=query) | [
"def",
"main",
"(",
"args",
",",
"parser",
",",
"subparser",
")",
":",
"from",
"sregistry",
".",
"main",
"import",
"get_client",
"cli",
"=",
"get_client",
"(",
"quiet",
"=",
"args",
".",
"quiet",
")",
"for",
"query",
"in",
"args",
".",
"query",
":",
"if",
"query",
"in",
"[",
"''",
",",
"'*'",
"]",
":",
"query",
"=",
"None",
"cli",
".",
"ls",
"(",
"query",
"=",
"query",
")"
] | 32.923077 | 0.011364 | [
"def main(args,parser,subparser):\n",
" '''the list command corresponds with listing images for an external\n",
" resource. This is different from listing images that are local to the\n",
" database, which should be done with \"images\"\n",
" '''\n",
" from sregistry.main import get_client\n",
" cli = get_client(quiet=args.quiet)\n",
" \n",
" for query in args.query:\n",
" if query in ['','*']:\n",
" query = None\n",
"\n",
" cli.ls(query=query)"
] | [
0.06060606060606061,
0,
0,
0,
0,
0,
0,
0.2,
0,
0.03333333333333333,
0,
0,
0.037037037037037035
] | 13 | 0.02546 |
def get_proxies():
"""Get available proxies to use with requests library."""
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http://'):
if not value.startswith('http://'):
filtered_proxies[key] = 'http://{0}'.format(value)
else:
filtered_proxies[key] = value
return filtered_proxies | [
"def",
"get_proxies",
"(",
")",
":",
"proxies",
"=",
"getproxies",
"(",
")",
"filtered_proxies",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"proxies",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'http://'",
")",
":",
"if",
"not",
"value",
".",
"startswith",
"(",
"'http://'",
")",
":",
"filtered_proxies",
"[",
"key",
"]",
"=",
"'http://{0}'",
".",
"format",
"(",
"value",
")",
"else",
":",
"filtered_proxies",
"[",
"key",
"]",
"=",
"value",
"return",
"filtered_proxies"
] | 37 | 0.002398 | [
"def get_proxies():\n",
" \"\"\"Get available proxies to use with requests library.\"\"\"\n",
" proxies = getproxies()\n",
" filtered_proxies = {}\n",
" for key, value in proxies.items():\n",
" if key.startswith('http://'):\n",
" if not value.startswith('http://'):\n",
" filtered_proxies[key] = 'http://{0}'.format(value)\n",
" else:\n",
" filtered_proxies[key] = value\n",
" return filtered_proxies"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035
] | 11 | 0.003367 |
def update_indel(self, nucmer_snp):
'''Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False'''
new_variant = Variant(nucmer_snp)
if self.var_type not in [INS, DEL] \
or self.var_type != new_variant.var_type \
or self.qry_name != new_variant.qry_name \
or self.ref_name != new_variant.ref_name \
or self.reverse != new_variant.reverse:
return False
if self.var_type == INS \
and self.ref_start == new_variant.ref_start \
and self.qry_end + 1 == new_variant.qry_start:
self.qry_base += new_variant.qry_base
self.qry_end += 1
return True
if self.var_type == DEL \
and self.qry_start == new_variant.qry_start \
and self.ref_end + 1 == new_variant.ref_start:
self.ref_base += new_variant.ref_base
self.ref_end += 1
return True
return False | [
"def",
"update_indel",
"(",
"self",
",",
"nucmer_snp",
")",
":",
"new_variant",
"=",
"Variant",
"(",
"nucmer_snp",
")",
"if",
"self",
".",
"var_type",
"not",
"in",
"[",
"INS",
",",
"DEL",
"]",
"or",
"self",
".",
"var_type",
"!=",
"new_variant",
".",
"var_type",
"or",
"self",
".",
"qry_name",
"!=",
"new_variant",
".",
"qry_name",
"or",
"self",
".",
"ref_name",
"!=",
"new_variant",
".",
"ref_name",
"or",
"self",
".",
"reverse",
"!=",
"new_variant",
".",
"reverse",
":",
"return",
"False",
"if",
"self",
".",
"var_type",
"==",
"INS",
"and",
"self",
".",
"ref_start",
"==",
"new_variant",
".",
"ref_start",
"and",
"self",
".",
"qry_end",
"+",
"1",
"==",
"new_variant",
".",
"qry_start",
":",
"self",
".",
"qry_base",
"+=",
"new_variant",
".",
"qry_base",
"self",
".",
"qry_end",
"+=",
"1",
"return",
"True",
"if",
"self",
".",
"var_type",
"==",
"DEL",
"and",
"self",
".",
"qry_start",
"==",
"new_variant",
".",
"qry_start",
"and",
"self",
".",
"ref_end",
"+",
"1",
"==",
"new_variant",
".",
"ref_start",
":",
"self",
".",
"ref_base",
"+=",
"new_variant",
".",
"ref_base",
"self",
".",
"ref_end",
"+=",
"1",
"return",
"True",
"return",
"False"
] | 49.086957 | 0.009557 | [
"def update_indel(self, nucmer_snp):\n",
" '''Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False'''\n",
" new_variant = Variant(nucmer_snp)\n",
" if self.var_type not in [INS, DEL] \\\n",
" or self.var_type != new_variant.var_type \\\n",
" or self.qry_name != new_variant.qry_name \\\n",
" or self.ref_name != new_variant.ref_name \\\n",
" or self.reverse != new_variant.reverse:\n",
" return False\n",
" if self.var_type == INS \\\n",
" and self.ref_start == new_variant.ref_start \\\n",
" and self.qry_end + 1 == new_variant.qry_start:\n",
" self.qry_base += new_variant.qry_base\n",
" self.qry_end += 1\n",
" return True\n",
" if self.var_type == DEL \\\n",
" and self.qry_start == new_variant.qry_start \\\n",
" and self.ref_end + 1 == new_variant.ref_start:\n",
" self.ref_base += new_variant.ref_base\n",
" self.ref_end += 1\n",
" return True\n",
"\n",
" return False"
] | [
0,
0.007380073800738007,
0,
0,
0.018867924528301886,
0.018867924528301886,
0.018867924528301886,
0.02,
0,
0,
0.017857142857142856,
0.017543859649122806,
0,
0,
0,
0,
0.017857142857142856,
0.017543859649122806,
0,
0,
0,
0,
0.05
] | 23 | 0.008904 |
def update(self, list_id, segment_id, data):
"""
updates an existing list segment.
"""
return self._mc_client._patch(url=self._build_path(list_id, 'segments', segment_id), data=data) | [
"def",
"update",
"(",
"self",
",",
"list_id",
",",
"segment_id",
",",
"data",
")",
":",
"return",
"self",
".",
"_mc_client",
".",
"_patch",
"(",
"url",
"=",
"self",
".",
"_build_path",
"(",
"list_id",
",",
"'segments'",
",",
"segment_id",
")",
",",
"data",
"=",
"data",
")"
] | 42 | 0.014019 | [
"def update(self, list_id, segment_id, data):\n",
" \"\"\"\n",
" updates an existing list segment.\n",
" \"\"\"\n",
" return self._mc_client._patch(url=self._build_path(list_id, 'segments', segment_id), data=data)"
] | [
0,
0.08333333333333333,
0,
0,
0.019417475728155338
] | 5 | 0.02055 |
def run_interrupted(self):
"""
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
start = datetime.datetime.now()
try:
cwd = os.getcwd()
v = sys.version.replace("\n", " ")
logger.info("Custodian started in singleshot mode at {} in {}."
.format(start, cwd))
logger.info("Custodian running on Python version {}".format(v))
# load run log
if os.path.exists(Custodian.LOG_FILE):
self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)
if len(self.run_log) == 0:
# starting up an initial job - setup input and quit
job_n = 0
job = self.jobs[job_n]
logger.info("Setting up job no. 1 ({}) ".format(job.name))
job.setup()
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
return len(self.jobs)
else:
# Continuing after running calculation
job_n = self.run_log[-1]['job_n']
job = self.jobs[job_n]
# If we had to fix errors from a previous run, insert clean log
# dict
if len(self.run_log[-1]['corrections']) > 0:
logger.info("Reran {}.run due to fixable errors".format(
job.name))
# check error handlers
logger.info("Checking error handlers for {}.run".format(
job.name))
if self._do_check(self.handlers):
logger.info("Failed validation based on error handlers")
# raise an error for an unrecoverable error
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
logger.info("Corrected input based on error handlers")
# Return with more jobs to run if recoverable error caught
# and corrected for
return len(self.jobs) - job_n
# check validators
logger.info("Checking validator for {}.run".format(job.name))
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
logger.info("Failed validation based on validator")
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
logger.info("Postprocessing for {}.run".format(job.name))
job.postprocess()
# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
# VALIDATED
if len(self.jobs) == (job_n + 1):
self.finished = True
return 0
# Setup next job_n
job_n += 1
job = self.jobs[job_n]
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
job.setup()
return len(self.jobs) - job_n
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.finished and self.gzipped_output:
gzip_dir(".") | [
"def",
"run_interrupted",
"(",
"self",
")",
":",
"start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"v",
"=",
"sys",
".",
"version",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
"logger",
".",
"info",
"(",
"\"Custodian started in singleshot mode at {} in {}.\"",
".",
"format",
"(",
"start",
",",
"cwd",
")",
")",
"logger",
".",
"info",
"(",
"\"Custodian running on Python version {}\"",
".",
"format",
"(",
"v",
")",
")",
"# load run log",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"Custodian",
".",
"LOG_FILE",
")",
":",
"self",
".",
"run_log",
"=",
"loadfn",
"(",
"Custodian",
".",
"LOG_FILE",
",",
"cls",
"=",
"MontyDecoder",
")",
"if",
"len",
"(",
"self",
".",
"run_log",
")",
"==",
"0",
":",
"# starting up an initial job - setup input and quit",
"job_n",
"=",
"0",
"job",
"=",
"self",
".",
"jobs",
"[",
"job_n",
"]",
"logger",
".",
"info",
"(",
"\"Setting up job no. 1 ({}) \"",
".",
"format",
"(",
"job",
".",
"name",
")",
")",
"job",
".",
"setup",
"(",
")",
"self",
".",
"run_log",
".",
"append",
"(",
"{",
"\"job\"",
":",
"job",
".",
"as_dict",
"(",
")",
",",
"\"corrections\"",
":",
"[",
"]",
",",
"'job_n'",
":",
"job_n",
"}",
")",
"return",
"len",
"(",
"self",
".",
"jobs",
")",
"else",
":",
"# Continuing after running calculation",
"job_n",
"=",
"self",
".",
"run_log",
"[",
"-",
"1",
"]",
"[",
"'job_n'",
"]",
"job",
"=",
"self",
".",
"jobs",
"[",
"job_n",
"]",
"# If we had to fix errors from a previous run, insert clean log",
"# dict",
"if",
"len",
"(",
"self",
".",
"run_log",
"[",
"-",
"1",
"]",
"[",
"'corrections'",
"]",
")",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"Reran {}.run due to fixable errors\"",
".",
"format",
"(",
"job",
".",
"name",
")",
")",
"# check error handlers",
"logger",
".",
"info",
"(",
"\"Checking error handlers for {}.run\"",
".",
"format",
"(",
"job",
".",
"name",
")",
")",
"if",
"self",
".",
"_do_check",
"(",
"self",
".",
"handlers",
")",
":",
"logger",
".",
"info",
"(",
"\"Failed validation based on error handlers\"",
")",
"# raise an error for an unrecoverable error",
"for",
"x",
"in",
"self",
".",
"run_log",
"[",
"-",
"1",
"]",
"[",
"\"corrections\"",
"]",
":",
"if",
"not",
"x",
"[",
"\"actions\"",
"]",
"and",
"x",
"[",
"\"handler\"",
"]",
".",
"raises_runtime_error",
":",
"self",
".",
"run_log",
"[",
"-",
"1",
"]",
"[",
"\"handler\"",
"]",
"=",
"x",
"[",
"\"handler\"",
"]",
"s",
"=",
"\"Unrecoverable error for handler: {}. \"",
"\"Raising RuntimeError\"",
".",
"format",
"(",
"x",
"[",
"\"handler\"",
"]",
")",
"raise",
"NonRecoverableError",
"(",
"s",
",",
"True",
",",
"x",
"[",
"\"handler\"",
"]",
")",
"logger",
".",
"info",
"(",
"\"Corrected input based on error handlers\"",
")",
"# Return with more jobs to run if recoverable error caught",
"# and corrected for",
"return",
"len",
"(",
"self",
".",
"jobs",
")",
"-",
"job_n",
"# check validators",
"logger",
".",
"info",
"(",
"\"Checking validator for {}.run\"",
".",
"format",
"(",
"job",
".",
"name",
")",
")",
"for",
"v",
"in",
"self",
".",
"validators",
":",
"if",
"v",
".",
"check",
"(",
")",
":",
"self",
".",
"run_log",
"[",
"-",
"1",
"]",
"[",
"\"validator\"",
"]",
"=",
"v",
"logger",
".",
"info",
"(",
"\"Failed validation based on validator\"",
")",
"s",
"=",
"\"Validation failed: {}\"",
".",
"format",
"(",
"v",
")",
"raise",
"ValidationError",
"(",
"s",
",",
"True",
",",
"v",
")",
"logger",
".",
"info",
"(",
"\"Postprocessing for {}.run\"",
".",
"format",
"(",
"job",
".",
"name",
")",
")",
"job",
".",
"postprocess",
"(",
")",
"# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN",
"# VALIDATED",
"if",
"len",
"(",
"self",
".",
"jobs",
")",
"==",
"(",
"job_n",
"+",
"1",
")",
":",
"self",
".",
"finished",
"=",
"True",
"return",
"0",
"# Setup next job_n",
"job_n",
"+=",
"1",
"job",
"=",
"self",
".",
"jobs",
"[",
"job_n",
"]",
"self",
".",
"run_log",
".",
"append",
"(",
"{",
"\"job\"",
":",
"job",
".",
"as_dict",
"(",
")",
",",
"\"corrections\"",
":",
"[",
"]",
",",
"'job_n'",
":",
"job_n",
"}",
")",
"job",
".",
"setup",
"(",
")",
"return",
"len",
"(",
"self",
".",
"jobs",
")",
"-",
"job_n",
"except",
"CustodianError",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"ex",
".",
"message",
")",
"if",
"ex",
".",
"raises",
":",
"raise",
"finally",
":",
"# Log the corrections to a json file.",
"logger",
".",
"info",
"(",
"\"Logging to {}...\"",
".",
"format",
"(",
"Custodian",
".",
"LOG_FILE",
")",
")",
"dumpfn",
"(",
"self",
".",
"run_log",
",",
"Custodian",
".",
"LOG_FILE",
",",
"cls",
"=",
"MontyEncoder",
",",
"indent",
"=",
"4",
")",
"end",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"logger",
".",
"info",
"(",
"\"Run ended at {}.\"",
".",
"format",
"(",
"end",
")",
")",
"run_time",
"=",
"end",
"-",
"start",
"logger",
".",
"info",
"(",
"\"Run completed. Total time taken = {}.\"",
".",
"format",
"(",
"run_time",
")",
")",
"if",
"self",
".",
"finished",
"and",
"self",
".",
"gzipped_output",
":",
"gzip_dir",
"(",
"\".\"",
")"
] | 43.5 | 0.000624 | [
"def run_interrupted(self):\n",
" \"\"\"\n",
" Runs custodian in a interuppted mode, which sets up and\n",
" validates jobs but doesn't run the executable\n",
"\n",
" Returns:\n",
" number of remaining jobs\n",
"\n",
" Raises:\n",
" ValidationError: if a job fails validation\n",
" ReturnCodeError: if the process has a return code different from 0\n",
" NonRecoverableError: if an unrecoverable occurs\n",
" MaxCorrectionsPerJobError: if max_errors_per_job is reached\n",
" MaxCorrectionsError: if max_errors is reached\n",
" MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached\n",
" \"\"\"\n",
" start = datetime.datetime.now()\n",
" try:\n",
" cwd = os.getcwd()\n",
" v = sys.version.replace(\"\\n\", \" \")\n",
" logger.info(\"Custodian started in singleshot mode at {} in {}.\"\n",
" .format(start, cwd))\n",
" logger.info(\"Custodian running on Python version {}\".format(v))\n",
"\n",
" # load run log\n",
" if os.path.exists(Custodian.LOG_FILE):\n",
" self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)\n",
"\n",
" if len(self.run_log) == 0:\n",
" # starting up an initial job - setup input and quit\n",
" job_n = 0\n",
" job = self.jobs[job_n]\n",
" logger.info(\"Setting up job no. 1 ({}) \".format(job.name))\n",
" job.setup()\n",
" self.run_log.append({\"job\": job.as_dict(), \"corrections\": [],\n",
" 'job_n': job_n})\n",
" return len(self.jobs)\n",
" else:\n",
" # Continuing after running calculation\n",
" job_n = self.run_log[-1]['job_n']\n",
" job = self.jobs[job_n]\n",
"\n",
" # If we had to fix errors from a previous run, insert clean log\n",
" # dict\n",
" if len(self.run_log[-1]['corrections']) > 0:\n",
" logger.info(\"Reran {}.run due to fixable errors\".format(\n",
" job.name))\n",
"\n",
" # check error handlers\n",
" logger.info(\"Checking error handlers for {}.run\".format(\n",
" job.name))\n",
" if self._do_check(self.handlers):\n",
" logger.info(\"Failed validation based on error handlers\")\n",
" # raise an error for an unrecoverable error\n",
" for x in self.run_log[-1][\"corrections\"]:\n",
" if not x[\"actions\"] and x[\"handler\"].raises_runtime_error:\n",
" self.run_log[-1][\"handler\"] = x[\"handler\"]\n",
" s = \"Unrecoverable error for handler: {}. \" \\\n",
" \"Raising RuntimeError\".format(x[\"handler\"])\n",
" raise NonRecoverableError(s, True, x[\"handler\"])\n",
" logger.info(\"Corrected input based on error handlers\")\n",
" # Return with more jobs to run if recoverable error caught\n",
" # and corrected for\n",
" return len(self.jobs) - job_n\n",
"\n",
" # check validators\n",
" logger.info(\"Checking validator for {}.run\".format(job.name))\n",
" for v in self.validators:\n",
" if v.check():\n",
" self.run_log[-1][\"validator\"] = v\n",
" logger.info(\"Failed validation based on validator\")\n",
" s = \"Validation failed: {}\".format(v)\n",
" raise ValidationError(s, True, v)\n",
"\n",
" logger.info(\"Postprocessing for {}.run\".format(job.name))\n",
" job.postprocess()\n",
"\n",
" # IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN\n",
" # VALIDATED\n",
" if len(self.jobs) == (job_n + 1):\n",
" self.finished = True\n",
" return 0\n",
"\n",
" # Setup next job_n\n",
" job_n += 1\n",
" job = self.jobs[job_n]\n",
" self.run_log.append({\"job\": job.as_dict(), \"corrections\": [],\n",
" 'job_n': job_n})\n",
" job.setup()\n",
" return len(self.jobs) - job_n\n",
"\n",
" except CustodianError as ex:\n",
" logger.error(ex.message)\n",
" if ex.raises:\n",
" raise\n",
"\n",
" finally:\n",
" # Log the corrections to a json file.\n",
" logger.info(\"Logging to {}...\".format(Custodian.LOG_FILE))\n",
" dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,\n",
" indent=4)\n",
" end = datetime.datetime.now()\n",
" logger.info(\"Run ended at {}.\".format(end))\n",
" run_time = end - start\n",
" logger.info(\"Run completed. Total time taken = {}.\"\n",
" .format(run_time))\n",
" if self.finished and self.gzipped_output:\n",
" gzip_dir(\".\")"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655
] | 108 | 0.001202 |
def reverb(self,
reverberance=50,
hf_damping=50,
room_scale=100,
stereo_depth=100,
pre_delay=20,
wet_gain=0,
wet_only=False):
"""reverb takes 7 parameters: reverberance, high-freqnency damping,
room scale, stereo depth, pre-delay, wet gain and wet only (Truce or
False)"""
self.command.append('reverb')
if wet_only:
self.command.append('-w')
self.command.append(reverberance)
self.command.append(hf_damping)
self.command.append(room_scale)
self.command.append(stereo_depth)
self.command.append(pre_delay)
self.command.append(wet_gain)
return self | [
"def",
"reverb",
"(",
"self",
",",
"reverberance",
"=",
"50",
",",
"hf_damping",
"=",
"50",
",",
"room_scale",
"=",
"100",
",",
"stereo_depth",
"=",
"100",
",",
"pre_delay",
"=",
"20",
",",
"wet_gain",
"=",
"0",
",",
"wet_only",
"=",
"False",
")",
":",
"self",
".",
"command",
".",
"append",
"(",
"'reverb'",
")",
"if",
"wet_only",
":",
"self",
".",
"command",
".",
"append",
"(",
"'-w'",
")",
"self",
".",
"command",
".",
"append",
"(",
"reverberance",
")",
"self",
".",
"command",
".",
"append",
"(",
"hf_damping",
")",
"self",
".",
"command",
".",
"append",
"(",
"room_scale",
")",
"self",
".",
"command",
".",
"append",
"(",
"stereo_depth",
")",
"self",
".",
"command",
".",
"append",
"(",
"pre_delay",
")",
"self",
".",
"command",
".",
"append",
"(",
"wet_gain",
")",
"return",
"self"
] | 35.190476 | 0.011858 | [
"def reverb(self,\n",
" reverberance=50,\n",
" hf_damping=50,\n",
" room_scale=100,\n",
" stereo_depth=100,\n",
" pre_delay=20,\n",
" wet_gain=0,\n",
" wet_only=False):\n",
" \"\"\"reverb takes 7 parameters: reverberance, high-freqnency damping,\n",
" room scale, stereo depth, pre-delay, wet gain and wet only (Truce or\n",
" False)\"\"\"\n",
" self.command.append('reverb')\n",
" if wet_only:\n",
" self.command.append('-w')\n",
" self.command.append(reverberance)\n",
" self.command.append(hf_damping)\n",
" self.command.append(room_scale)\n",
" self.command.append(stereo_depth)\n",
" self.command.append(pre_delay)\n",
" self.command.append(wet_gain)\n",
" return self"
] | [
0,
0.03125,
0.03333333333333333,
0.03225806451612903,
0.030303030303030304,
0.034482758620689655,
0.037037037037037035,
0.03125,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 21 | 0.014081 |
def _init_idxs_float(self, usr_hdrs):
"""List of indexes whose values will be floats."""
self.idxs_float = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.float_hdrs] | [
"def",
"_init_idxs_float",
"(",
"self",
",",
"usr_hdrs",
")",
":",
"self",
".",
"idxs_float",
"=",
"[",
"Idx",
"for",
"Hdr",
",",
"Idx",
"in",
"self",
".",
"hdr2idx",
".",
"items",
"(",
")",
"if",
"Hdr",
"in",
"usr_hdrs",
"and",
"Hdr",
"in",
"self",
".",
"float_hdrs",
"]"
] | 55.25 | 0.013393 | [
"def _init_idxs_float(self, usr_hdrs):\n",
" \"\"\"List of indexes whose values will be floats.\"\"\"\n",
" self.idxs_float = [\n",
" Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.float_hdrs]"
] | [
0,
0.01694915254237288,
0,
0.020202020202020204
] | 4 | 0.009288 |
def is_opposite(self, ns1, id1, ns2, id2):
"""Return True if two entities are in an "is_opposite" relationship
Parameters
----------
ns1 : str
Namespace code for an entity.
id1 : str
URI for an entity.
ns2 : str
Namespace code for an entity.
id2 : str
URI for an entity.
Returns
-------
bool
True if t1 has an "is_opposite" relationship with t2.
"""
u1 = self.get_uri(ns1, id1)
u2 = self.get_uri(ns2, id2)
t1 = rdflib.term.URIRef(u1)
t2 = rdflib.term.URIRef(u2)
rel = rdflib.term.URIRef(self.relations_prefix + 'is_opposite')
to = self.graph.objects(t1, rel)
if t2 in to:
return True
return False | [
"def",
"is_opposite",
"(",
"self",
",",
"ns1",
",",
"id1",
",",
"ns2",
",",
"id2",
")",
":",
"u1",
"=",
"self",
".",
"get_uri",
"(",
"ns1",
",",
"id1",
")",
"u2",
"=",
"self",
".",
"get_uri",
"(",
"ns2",
",",
"id2",
")",
"t1",
"=",
"rdflib",
".",
"term",
".",
"URIRef",
"(",
"u1",
")",
"t2",
"=",
"rdflib",
".",
"term",
".",
"URIRef",
"(",
"u2",
")",
"rel",
"=",
"rdflib",
".",
"term",
".",
"URIRef",
"(",
"self",
".",
"relations_prefix",
"+",
"'is_opposite'",
")",
"to",
"=",
"self",
".",
"graph",
".",
"objects",
"(",
"t1",
",",
"rel",
")",
"if",
"t2",
"in",
"to",
":",
"return",
"True",
"return",
"False"
] | 27.413793 | 0.00243 | [
"def is_opposite(self, ns1, id1, ns2, id2):\n",
" \"\"\"Return True if two entities are in an \"is_opposite\" relationship\n",
"\n",
" Parameters\n",
" ----------\n",
" ns1 : str\n",
" Namespace code for an entity.\n",
" id1 : str\n",
" URI for an entity.\n",
" ns2 : str\n",
" Namespace code for an entity.\n",
" id2 : str\n",
" URI for an entity.\n",
"\n",
" Returns\n",
" -------\n",
" bool\n",
" True if t1 has an \"is_opposite\" relationship with t2.\n",
" \"\"\"\n",
" u1 = self.get_uri(ns1, id1)\n",
" u2 = self.get_uri(ns2, id2)\n",
" t1 = rdflib.term.URIRef(u1)\n",
" t2 = rdflib.term.URIRef(u2)\n",
"\n",
" rel = rdflib.term.URIRef(self.relations_prefix + 'is_opposite')\n",
" to = self.graph.objects(t1, rel)\n",
" if t2 in to:\n",
" return True\n",
" return False"
] | [
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 29 | 0.002178 |
def update(cls, id, name, size, quantity, password, sshkey, upgrade,
console, snapshot_profile, reset_mysql_password, background):
"""Update a PaaS instance."""
if not background and not cls.intty():
background = True
paas_params = {}
if name:
paas_params['name'] = name
if size:
paas_params['size'] = size
if quantity:
paas_params['quantity'] = quantity
if password:
paas_params['password'] = password
paas_params.update(cls.convert_sshkey(sshkey))
if upgrade:
paas_params['upgrade'] = upgrade
if console:
paas_params['console'] = console
# XXX to delete a snapshot_profile the value has to be an empty string
if snapshot_profile is not None:
paas_params['snapshot_profile'] = snapshot_profile
if reset_mysql_password:
paas_params['reset_mysql_password'] = reset_mysql_password
result = cls.call('paas.update', cls.usable_id(id), paas_params)
if background:
return result
# interactive mode, run a progress bar
cls.echo('Updating your PaaS instance.')
cls.display_progress(result) | [
"def",
"update",
"(",
"cls",
",",
"id",
",",
"name",
",",
"size",
",",
"quantity",
",",
"password",
",",
"sshkey",
",",
"upgrade",
",",
"console",
",",
"snapshot_profile",
",",
"reset_mysql_password",
",",
"background",
")",
":",
"if",
"not",
"background",
"and",
"not",
"cls",
".",
"intty",
"(",
")",
":",
"background",
"=",
"True",
"paas_params",
"=",
"{",
"}",
"if",
"name",
":",
"paas_params",
"[",
"'name'",
"]",
"=",
"name",
"if",
"size",
":",
"paas_params",
"[",
"'size'",
"]",
"=",
"size",
"if",
"quantity",
":",
"paas_params",
"[",
"'quantity'",
"]",
"=",
"quantity",
"if",
"password",
":",
"paas_params",
"[",
"'password'",
"]",
"=",
"password",
"paas_params",
".",
"update",
"(",
"cls",
".",
"convert_sshkey",
"(",
"sshkey",
")",
")",
"if",
"upgrade",
":",
"paas_params",
"[",
"'upgrade'",
"]",
"=",
"upgrade",
"if",
"console",
":",
"paas_params",
"[",
"'console'",
"]",
"=",
"console",
"# XXX to delete a snapshot_profile the value has to be an empty string",
"if",
"snapshot_profile",
"is",
"not",
"None",
":",
"paas_params",
"[",
"'snapshot_profile'",
"]",
"=",
"snapshot_profile",
"if",
"reset_mysql_password",
":",
"paas_params",
"[",
"'reset_mysql_password'",
"]",
"=",
"reset_mysql_password",
"result",
"=",
"cls",
".",
"call",
"(",
"'paas.update'",
",",
"cls",
".",
"usable_id",
"(",
"id",
")",
",",
"paas_params",
")",
"if",
"background",
":",
"return",
"result",
"# interactive mode, run a progress bar",
"cls",
".",
"echo",
"(",
"'Updating your PaaS instance.'",
")",
"cls",
".",
"display_progress",
"(",
"result",
")"
] | 29.309524 | 0.002358 | [
"def update(cls, id, name, size, quantity, password, sshkey, upgrade,\n",
" console, snapshot_profile, reset_mysql_password, background):\n",
" \"\"\"Update a PaaS instance.\"\"\"\n",
" if not background and not cls.intty():\n",
" background = True\n",
"\n",
" paas_params = {}\n",
"\n",
" if name:\n",
" paas_params['name'] = name\n",
"\n",
" if size:\n",
" paas_params['size'] = size\n",
"\n",
" if quantity:\n",
" paas_params['quantity'] = quantity\n",
"\n",
" if password:\n",
" paas_params['password'] = password\n",
"\n",
" paas_params.update(cls.convert_sshkey(sshkey))\n",
"\n",
" if upgrade:\n",
" paas_params['upgrade'] = upgrade\n",
"\n",
" if console:\n",
" paas_params['console'] = console\n",
"\n",
" # XXX to delete a snapshot_profile the value has to be an empty string\n",
" if snapshot_profile is not None:\n",
" paas_params['snapshot_profile'] = snapshot_profile\n",
"\n",
" if reset_mysql_password:\n",
" paas_params['reset_mysql_password'] = reset_mysql_password\n",
"\n",
" result = cls.call('paas.update', cls.usable_id(id), paas_params)\n",
" if background:\n",
" return result\n",
"\n",
" # interactive mode, run a progress bar\n",
" cls.echo('Updating your PaaS instance.')\n",
" cls.display_progress(result)"
] | [
0,
0.012987012987012988,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776
] | 42 | 0.001597 |
def bufsize_validator(kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = kwargs.get("in", None)
out_ob = kwargs.get("out", None)
in_buf = kwargs.get("in_bufsize", None)
out_buf = kwargs.get("out_bufsize", None)
in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)
out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid | [
"def",
"bufsize_validator",
"(",
"kwargs",
")",
":",
"invalid",
"=",
"[",
"]",
"in_ob",
"=",
"kwargs",
".",
"get",
"(",
"\"in\"",
",",
"None",
")",
"out_ob",
"=",
"kwargs",
".",
"get",
"(",
"\"out\"",
",",
"None",
")",
"in_buf",
"=",
"kwargs",
".",
"get",
"(",
"\"in_bufsize\"",
",",
"None",
")",
"out_buf",
"=",
"kwargs",
".",
"get",
"(",
"\"out_bufsize\"",
",",
"None",
")",
"in_no_buf",
"=",
"ob_is_tty",
"(",
"in_ob",
")",
"or",
"ob_is_pipe",
"(",
"in_ob",
")",
"out_no_buf",
"=",
"ob_is_tty",
"(",
"out_ob",
")",
"or",
"ob_is_pipe",
"(",
"out_ob",
")",
"err",
"=",
"\"Can't specify an {target} bufsize if the {target} target is a pipe or TTY\"",
"if",
"in_no_buf",
"and",
"in_buf",
"is",
"not",
"None",
":",
"invalid",
".",
"append",
"(",
"(",
"(",
"\"in\"",
",",
"\"in_bufsize\"",
")",
",",
"err",
".",
"format",
"(",
"target",
"=",
"\"in\"",
")",
")",
")",
"if",
"out_no_buf",
"and",
"out_buf",
"is",
"not",
"None",
":",
"invalid",
".",
"append",
"(",
"(",
"(",
"\"out\"",
",",
"\"out_bufsize\"",
")",
",",
"err",
".",
"format",
"(",
"target",
"=",
"\"out\"",
")",
")",
")",
"return",
"invalid"
] | 37.115385 | 0.00202 | [
"def bufsize_validator(kwargs):\n",
" \"\"\" a validator to prevent a user from saying that they want custom\n",
" buffering when they're using an in/out object that will be os.dup'd to the\n",
" process, and has its own buffering. an example is a pipe or a tty. it\n",
" doesn't make sense to tell them to have a custom buffering, since the os\n",
" controls this. \"\"\"\n",
" invalid = []\n",
"\n",
" in_ob = kwargs.get(\"in\", None)\n",
" out_ob = kwargs.get(\"out\", None)\n",
"\n",
" in_buf = kwargs.get(\"in_bufsize\", None)\n",
" out_buf = kwargs.get(\"out_bufsize\", None)\n",
"\n",
" in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)\n",
" out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)\n",
"\n",
" err = \"Can't specify an {target} bufsize if the {target} target is a pipe or TTY\"\n",
"\n",
" if in_no_buf and in_buf is not None:\n",
" invalid.append(((\"in\", \"in_bufsize\"), err.format(target=\"in\")))\n",
"\n",
" if out_no_buf and out_buf is not None:\n",
" invalid.append(((\"out\", \"out_bufsize\"), err.format(target=\"out\")))\n",
"\n",
" return invalid"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 26 | 0.002584 |
def resource_filename_mod_entry_point(module_name, entry_point):
"""
If a given package declares a namespace and also provide submodules
nested at that namespace level, and for whatever reason that module
is needed, Python's import mechanism will not have a path associated
with that module. However, if given an entry_point, this path can
be resolved through its distribution. That said, the default
resource_filename function does not accept an entry_point, and so we
have to chain that back together manually.
"""
if entry_point.dist is None:
# distribution missing is typically caused by mocked entry
# points from tests; silently falling back to basic lookup
result = pkg_resources.resource_filename(module_name, '')
else:
result = resource_filename_mod_dist(module_name, entry_point.dist)
if not result:
logger.warning(
"resource path cannot be found for module '%s' and entry_point "
"'%s'", module_name, entry_point
)
return None
if not exists(result):
logger.warning(
"resource path found at '%s' for module '%s' and entry_point "
"'%s', but it does not exist", result, module_name, entry_point,
)
return None
return result | [
"def",
"resource_filename_mod_entry_point",
"(",
"module_name",
",",
"entry_point",
")",
":",
"if",
"entry_point",
".",
"dist",
"is",
"None",
":",
"# distribution missing is typically caused by mocked entry",
"# points from tests; silently falling back to basic lookup",
"result",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"module_name",
",",
"''",
")",
"else",
":",
"result",
"=",
"resource_filename_mod_dist",
"(",
"module_name",
",",
"entry_point",
".",
"dist",
")",
"if",
"not",
"result",
":",
"logger",
".",
"warning",
"(",
"\"resource path cannot be found for module '%s' and entry_point \"",
"\"'%s'\"",
",",
"module_name",
",",
"entry_point",
")",
"return",
"None",
"if",
"not",
"exists",
"(",
"result",
")",
":",
"logger",
".",
"warning",
"(",
"\"resource path found at '%s' for module '%s' and entry_point \"",
"\"'%s', but it does not exist\"",
",",
"result",
",",
"module_name",
",",
"entry_point",
",",
")",
"return",
"None",
"return",
"result"
] | 41.612903 | 0.000758 | [
"def resource_filename_mod_entry_point(module_name, entry_point):\n",
" \"\"\"\n",
" If a given package declares a namespace and also provide submodules\n",
" nested at that namespace level, and for whatever reason that module\n",
" is needed, Python's import mechanism will not have a path associated\n",
" with that module. However, if given an entry_point, this path can\n",
" be resolved through its distribution. That said, the default\n",
" resource_filename function does not accept an entry_point, and so we\n",
" have to chain that back together manually.\n",
" \"\"\"\n",
"\n",
" if entry_point.dist is None:\n",
" # distribution missing is typically caused by mocked entry\n",
" # points from tests; silently falling back to basic lookup\n",
" result = pkg_resources.resource_filename(module_name, '')\n",
" else:\n",
" result = resource_filename_mod_dist(module_name, entry_point.dist)\n",
"\n",
" if not result:\n",
" logger.warning(\n",
" \"resource path cannot be found for module '%s' and entry_point \"\n",
" \"'%s'\", module_name, entry_point\n",
" )\n",
" return None\n",
" if not exists(result):\n",
" logger.warning(\n",
" \"resource path found at '%s' for module '%s' and entry_point \"\n",
" \"'%s', but it does not exist\", result, module_name, entry_point,\n",
" )\n",
" return None\n",
" return result"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 31 | 0.001898 |
def view_entries(search_query=None):
"""View previous entries"""
if search_query:
expr = Entry.content.search(search_query)
else:
expr = None
query = Entry.query(expr, order_by=Entry.timestamp.desc())
for entry in query:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
print(timestamp)
print('=' * len(timestamp))
print(entry.content)
print('n) next entry')
print('d) delete entry')
print('q) return to main menu')
choice = raw_input('Choice? (Ndq) ').lower().strip()
if choice == 'q':
break
elif choice == 'd':
entry.delete()
print('Entry deleted successfully.')
break | [
"def",
"view_entries",
"(",
"search_query",
"=",
"None",
")",
":",
"if",
"search_query",
":",
"expr",
"=",
"Entry",
".",
"content",
".",
"search",
"(",
"search_query",
")",
"else",
":",
"expr",
"=",
"None",
"query",
"=",
"Entry",
".",
"query",
"(",
"expr",
",",
"order_by",
"=",
"Entry",
".",
"timestamp",
".",
"desc",
"(",
")",
")",
"for",
"entry",
"in",
"query",
":",
"timestamp",
"=",
"entry",
".",
"timestamp",
".",
"strftime",
"(",
"'%A %B %d, %Y %I:%M%p'",
")",
"print",
"(",
"timestamp",
")",
"print",
"(",
"'='",
"*",
"len",
"(",
"timestamp",
")",
")",
"print",
"(",
"entry",
".",
"content",
")",
"print",
"(",
"'n) next entry'",
")",
"print",
"(",
"'d) delete entry'",
")",
"print",
"(",
"'q) return to main menu'",
")",
"choice",
"=",
"raw_input",
"(",
"'Choice? (Ndq) '",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"if",
"choice",
"==",
"'q'",
":",
"break",
"elif",
"choice",
"==",
"'d'",
":",
"entry",
".",
"delete",
"(",
")",
"print",
"(",
"'Entry deleted successfully.'",
")",
"break"
] | 31.521739 | 0.001339 | [
"def view_entries(search_query=None):\n",
" \"\"\"View previous entries\"\"\"\n",
" if search_query:\n",
" expr = Entry.content.search(search_query)\n",
" else:\n",
" expr = None\n",
"\n",
" query = Entry.query(expr, order_by=Entry.timestamp.desc())\n",
" for entry in query:\n",
" timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')\n",
" print(timestamp)\n",
" print('=' * len(timestamp))\n",
" print(entry.content)\n",
" print('n) next entry')\n",
" print('d) delete entry')\n",
" print('q) return to main menu')\n",
" choice = raw_input('Choice? (Ndq) ').lower().strip()\n",
" if choice == 'q':\n",
" break\n",
" elif choice == 'd':\n",
" entry.delete()\n",
" print('Entry deleted successfully.')\n",
" break"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 23 | 0.002558 |
def supports_spatial_unit_record_type(self, spatial_unit_record_type=None):
"""Tests if the given spatial unit record type is supported.
arg: spatial_unit_record_type (osid.type.Type): a spatial
unit record Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not an ``SPATIALUNIT``
raise: NullArgument - ``spatial_unit_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not spatial_unit_record_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``SPATIALUNIT``']:
raise IllegalState('put more meaninful message here')
return spatial_unit_record_type in self.get_spatial_unit_record_types | [
"def",
"supports_spatial_unit_record_type",
"(",
"self",
",",
"spatial_unit_record_type",
"=",
"None",
")",
":",
"# Implemented from template for osid.Metadata.supports_coordinate_type",
"from",
".",
"osid_errors",
"import",
"IllegalState",
",",
"NullArgument",
"if",
"not",
"spatial_unit_record_type",
":",
"raise",
"NullArgument",
"(",
"'no input Type provided'",
")",
"if",
"self",
".",
"_kwargs",
"[",
"'syntax'",
"]",
"not",
"in",
"[",
"'``SPATIALUNIT``'",
"]",
":",
"raise",
"IllegalState",
"(",
"'put more meaninful message here'",
")",
"return",
"spatial_unit_record_type",
"in",
"self",
".",
"get_spatial_unit_record_types"
] | 52 | 0.001988 | [
"def supports_spatial_unit_record_type(self, spatial_unit_record_type=None):\n",
" \"\"\"Tests if the given spatial unit record type is supported.\n",
"\n",
" arg: spatial_unit_record_type (osid.type.Type): a spatial\n",
" unit record Type\n",
" return: (boolean) - ``true`` if the type is supported, ``false``\n",
" otherwise\n",
" raise: IllegalState - syntax is not an ``SPATIALUNIT``\n",
" raise: NullArgument - ``spatial_unit_record_type`` is ``null``\n",
" *compliance: mandatory -- This method must be implemented.*\n",
"\n",
" \"\"\"\n",
" # Implemented from template for osid.Metadata.supports_coordinate_type\n",
" from .osid_errors import IllegalState, NullArgument\n",
" if not spatial_unit_record_type:\n",
" raise NullArgument('no input Type provided')\n",
" if self._kwargs['syntax'] not in ['``SPATIALUNIT``']:\n",
" raise IllegalState('put more meaninful message here')\n",
" return spatial_unit_record_type in self.get_spatial_unit_record_types"
] | [
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988
] | 19 | 0.001446 |
def to_nodename(string, invalid=None, raise_exc=False):
"""Makes a Quilt Node name (perhaps an ugly one) out of any string.
This should match whatever the current definition of a node name is, as
defined in is_nodename().
This isn't an isomorphic change, the original name can't be recovered
from the change in all cases, so it must be stored separately (`FileNode`
metadata)
If `invalid` is given, it should be an iterable of names that the returned
string cannot match -- for example, other node names.
If `raise_exc` is True, an exception is raised when the converted string
is present in `invalid`. Otherwise, the converted string will have a
number appended to its name.
Example:
# replace special chars -> remove prefix underscores -> rename keywords
# '!if' -> '_if' -> 'if' -> 'if_'
>>> to_nodename('!if') -> 'if_'
>>> to_nodename('if', ['if_']) -> 'if__2'
>>> to_nodename('9#blah') -> 'n9_blah'
>>> to_nodename('9:blah', ['n9_blah', 'n9_blah_2']) -> 'n9_blah_3'
:param string: string to convert to a nodename
:param invalid: Container of names to avoid. Efficiency: Use set or dict
:param raise_exc: Raise an exception on name conflicts if truthy.
:returns: valid node name
"""
string = to_identifier(string)
#TODO: Remove this stanza once keywords are permissible in nodenames
if keyword.iskeyword(string):
string += '_' # there are no keywords ending in "_"
# Done if no deduplication needed
if invalid is None:
return string
# Deduplicate
if string in invalid and raise_exc:
raise QuiltException("Conflicting node name after string conversion: {!r}".format(string))
result = string
counter = 1
while result in invalid:
# first conflicted name will be "somenode_2"
# The result is "somenode", "somenode_2", "somenode_3"..
counter += 1
result = "{}_{}".format(string, counter)
return result | [
"def",
"to_nodename",
"(",
"string",
",",
"invalid",
"=",
"None",
",",
"raise_exc",
"=",
"False",
")",
":",
"string",
"=",
"to_identifier",
"(",
"string",
")",
"#TODO: Remove this stanza once keywords are permissible in nodenames",
"if",
"keyword",
".",
"iskeyword",
"(",
"string",
")",
":",
"string",
"+=",
"'_'",
"# there are no keywords ending in \"_\"",
"# Done if no deduplication needed",
"if",
"invalid",
"is",
"None",
":",
"return",
"string",
"# Deduplicate",
"if",
"string",
"in",
"invalid",
"and",
"raise_exc",
":",
"raise",
"QuiltException",
"(",
"\"Conflicting node name after string conversion: {!r}\"",
".",
"format",
"(",
"string",
")",
")",
"result",
"=",
"string",
"counter",
"=",
"1",
"while",
"result",
"in",
"invalid",
":",
"# first conflicted name will be \"somenode_2\"",
"# The result is \"somenode\", \"somenode_2\", \"somenode_3\"..",
"counter",
"+=",
"1",
"result",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"string",
",",
"counter",
")",
"return",
"result"
] | 36.886792 | 0.001495 | [
"def to_nodename(string, invalid=None, raise_exc=False):\n",
" \"\"\"Makes a Quilt Node name (perhaps an ugly one) out of any string.\n",
"\n",
" This should match whatever the current definition of a node name is, as\n",
" defined in is_nodename().\n",
"\n",
" This isn't an isomorphic change, the original name can't be recovered\n",
" from the change in all cases, so it must be stored separately (`FileNode`\n",
" metadata)\n",
"\n",
" If `invalid` is given, it should be an iterable of names that the returned\n",
" string cannot match -- for example, other node names.\n",
"\n",
" If `raise_exc` is True, an exception is raised when the converted string\n",
" is present in `invalid`. Otherwise, the converted string will have a\n",
" number appended to its name.\n",
"\n",
" Example:\n",
" # replace special chars -> remove prefix underscores -> rename keywords\n",
" # '!if' -> '_if' -> 'if' -> 'if_'\n",
" >>> to_nodename('!if') -> 'if_'\n",
" >>> to_nodename('if', ['if_']) -> 'if__2'\n",
" >>> to_nodename('9#blah') -> 'n9_blah'\n",
" >>> to_nodename('9:blah', ['n9_blah', 'n9_blah_2']) -> 'n9_blah_3'\n",
"\n",
" :param string: string to convert to a nodename\n",
" :param invalid: Container of names to avoid. Efficiency: Use set or dict\n",
" :param raise_exc: Raise an exception on name conflicts if truthy.\n",
" :returns: valid node name\n",
" \"\"\"\n",
" string = to_identifier(string)\n",
"\n",
" #TODO: Remove this stanza once keywords are permissible in nodenames\n",
" if keyword.iskeyword(string):\n",
" string += '_' # there are no keywords ending in \"_\"\n",
"\n",
" # Done if no deduplication needed\n",
" if invalid is None:\n",
" return string\n",
"\n",
" # Deduplicate\n",
" if string in invalid and raise_exc:\n",
" raise QuiltException(\"Conflicting node name after string conversion: {!r}\".format(string))\n",
"\n",
" result = string\n",
" counter = 1\n",
" while result in invalid:\n",
" # first conflicted name will be \"somenode_2\"\n",
" # The result is \"somenode\", \"somenode_2\", \"somenode_3\"..\n",
" counter += 1\n",
" result = \"{}_{}\".format(string, counter)\n",
"\n",
" return result"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 53 | 0.001559 |
def submit(self, fn, *args, **kwargs):
""" Internal """
if not self.is_shutdown:
return self.cluster.executor.submit(fn, *args, **kwargs) | [
"def",
"submit",
"(",
"self",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"is_shutdown",
":",
"return",
"self",
".",
"cluster",
".",
"executor",
".",
"submit",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 40.5 | 0.012121 | [
"def submit(self, fn, *args, **kwargs):\n",
" \"\"\" Internal \"\"\"\n",
" if not self.is_shutdown:\n",
" return self.cluster.executor.submit(fn, *args, **kwargs)"
] | [
0,
0.04,
0,
0.014705882352941176
] | 4 | 0.013676 |
def http_quote(string):
"""
Given a unicode string, will do its dandiest to give you back a
valid ascii charset string you can use in, say, http headers and the
like.
"""
if isinstance(string, six.text_type):
try:
import unidecode
except ImportError:
pass
else:
string = unidecode.unidecode(string)
string = string.encode('ascii', 'replace')
# Wrap in double-quotes for ; , and the like
string = string.replace(b'\\', b'\\\\').replace(b'"', b'\\"')
return '"{0!s}"'.format(string.decode()) | [
"def",
"http_quote",
"(",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"text_type",
")",
":",
"try",
":",
"import",
"unidecode",
"except",
"ImportError",
":",
"pass",
"else",
":",
"string",
"=",
"unidecode",
".",
"unidecode",
"(",
"string",
")",
"string",
"=",
"string",
".",
"encode",
"(",
"'ascii'",
",",
"'replace'",
")",
"# Wrap in double-quotes for ; , and the like",
"string",
"=",
"string",
".",
"replace",
"(",
"b'\\\\'",
",",
"b'\\\\\\\\'",
")",
".",
"replace",
"(",
"b'\"'",
",",
"b'\\\\\"'",
")",
"return",
"'\"{0!s}\"'",
".",
"format",
"(",
"string",
".",
"decode",
"(",
")",
")"
] | 33.941176 | 0.001686 | [
"def http_quote(string):\n",
" \"\"\"\n",
" Given a unicode string, will do its dandiest to give you back a\n",
" valid ascii charset string you can use in, say, http headers and the\n",
" like.\n",
" \"\"\"\n",
" if isinstance(string, six.text_type):\n",
" try:\n",
" import unidecode\n",
" except ImportError:\n",
" pass\n",
" else:\n",
" string = unidecode.unidecode(string)\n",
" string = string.encode('ascii', 'replace')\n",
" # Wrap in double-quotes for ; , and the like\n",
" string = string.replace(b'\\\\', b'\\\\\\\\').replace(b'\"', b'\\\\\"')\n",
" return '\"{0!s}\"'.format(string.decode())"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728
] | 17 | 0.001337 |
def mk_token(opts, tdata):
'''
Mint a new token using the config option hash_type and store tdata with 'token' attribute set
to the token.
This module uses the hash of random 512 bytes as a token.
:param opts: Salt master config options
:param tdata: Token data to be stored with 'token' attirbute of this dict set to the token.
:returns: tdata with token if successful. Empty dict if failed.
'''
redis_client = _redis_client(opts)
if not redis_client:
return {}
hash_type = getattr(hashlib, opts.get('hash_type', 'md5'))
tok = six.text_type(hash_type(os.urandom(512)).hexdigest())
try:
while redis_client.get(tok) is not None:
tok = six.text_type(hash_type(os.urandom(512)).hexdigest())
except Exception as err:
log.warning(
'Authentication failure: cannot get token %s from redis: %s',
tok, err
)
return {}
tdata['token'] = tok
serial = salt.payload.Serial(opts)
try:
redis_client.set(tok, serial.dumps(tdata))
except Exception as err:
log.warning(
'Authentication failure: cannot save token %s to redis: %s',
tok, err
)
return {}
return tdata | [
"def",
"mk_token",
"(",
"opts",
",",
"tdata",
")",
":",
"redis_client",
"=",
"_redis_client",
"(",
"opts",
")",
"if",
"not",
"redis_client",
":",
"return",
"{",
"}",
"hash_type",
"=",
"getattr",
"(",
"hashlib",
",",
"opts",
".",
"get",
"(",
"'hash_type'",
",",
"'md5'",
")",
")",
"tok",
"=",
"six",
".",
"text_type",
"(",
"hash_type",
"(",
"os",
".",
"urandom",
"(",
"512",
")",
")",
".",
"hexdigest",
"(",
")",
")",
"try",
":",
"while",
"redis_client",
".",
"get",
"(",
"tok",
")",
"is",
"not",
"None",
":",
"tok",
"=",
"six",
".",
"text_type",
"(",
"hash_type",
"(",
"os",
".",
"urandom",
"(",
"512",
")",
")",
".",
"hexdigest",
"(",
")",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"warning",
"(",
"'Authentication failure: cannot get token %s from redis: %s'",
",",
"tok",
",",
"err",
")",
"return",
"{",
"}",
"tdata",
"[",
"'token'",
"]",
"=",
"tok",
"serial",
"=",
"salt",
".",
"payload",
".",
"Serial",
"(",
"opts",
")",
"try",
":",
"redis_client",
".",
"set",
"(",
"tok",
",",
"serial",
".",
"dumps",
"(",
"tdata",
")",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"warning",
"(",
"'Authentication failure: cannot save token %s to redis: %s'",
",",
"tok",
",",
"err",
")",
"return",
"{",
"}",
"return",
"tdata"
] | 34.857143 | 0.002392 | [
"def mk_token(opts, tdata):\n",
" '''\n",
" Mint a new token using the config option hash_type and store tdata with 'token' attribute set\n",
" to the token.\n",
" This module uses the hash of random 512 bytes as a token.\n",
"\n",
" :param opts: Salt master config options\n",
" :param tdata: Token data to be stored with 'token' attirbute of this dict set to the token.\n",
" :returns: tdata with token if successful. Empty dict if failed.\n",
" '''\n",
" redis_client = _redis_client(opts)\n",
" if not redis_client:\n",
" return {}\n",
" hash_type = getattr(hashlib, opts.get('hash_type', 'md5'))\n",
" tok = six.text_type(hash_type(os.urandom(512)).hexdigest())\n",
" try:\n",
" while redis_client.get(tok) is not None:\n",
" tok = six.text_type(hash_type(os.urandom(512)).hexdigest())\n",
" except Exception as err:\n",
" log.warning(\n",
" 'Authentication failure: cannot get token %s from redis: %s',\n",
" tok, err\n",
" )\n",
" return {}\n",
" tdata['token'] = tok\n",
" serial = salt.payload.Serial(opts)\n",
" try:\n",
" redis_client.set(tok, serial.dumps(tdata))\n",
" except Exception as err:\n",
" log.warning(\n",
" 'Authentication failure: cannot save token %s to redis: %s',\n",
" tok, err\n",
" )\n",
" return {}\n",
" return tdata"
] | [
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 35 | 0.002375 |
def _urlopen_as_json(self, url, headers=None):
"""Shorcut for return contents as json"""
req = Request(url, headers=headers)
return json.loads(urlopen(req).read()) | [
"def",
"_urlopen_as_json",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"None",
")",
":",
"req",
"=",
"Request",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"return",
"json",
".",
"loads",
"(",
"urlopen",
"(",
"req",
")",
".",
"read",
"(",
")",
")"
] | 46 | 0.010695 | [
"def _urlopen_as_json(self, url, headers=None):\n",
" \"\"\"Shorcut for return contents as json\"\"\"\n",
" req = Request(url, headers=headers)\n",
" return json.loads(urlopen(req).read())"
] | [
0,
0.02,
0,
0.021739130434782608
] | 4 | 0.010435 |
def _read_mode_qsopt(self, size, kind):
"""Read Quick-Start Response option.
Positional arguments:
* size - int, length of option
* kind - int, 27 (Quick-Start Response)
Returns:
* dict -- extracted Quick-Start Response (QS) option
Structure of TCP QSopt [RFC 4782]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Kind | Length=8 | Resv. | Rate | TTL Diff |
| | | |Request| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| QS Nonce | R |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 tcp.qs.kind Kind (27)
1 8 tcp.qs.length Length (8)
2 16 - Reserved (must be zero)
2 20 tcp.qs.req_rate Request Rate
3 24 tcp.qs.ttl_diff TTL Difference
4 32 tcp.qs.nounce QS Nounce
7 62 - Reserved (must be zero)
"""
rvrr = self._read_binary(1)
ttld = self._read_unpack(1)
noun = self._read_fileng(4)
data = dict(
kind=kind,
length=size,
req_rate=int(rvrr[4:], base=2),
ttl_diff=ttld,
nounce=noun[:-2],
)
return data | [
"def",
"_read_mode_qsopt",
"(",
"self",
",",
"size",
",",
"kind",
")",
":",
"rvrr",
"=",
"self",
".",
"_read_binary",
"(",
"1",
")",
"ttld",
"=",
"self",
".",
"_read_unpack",
"(",
"1",
")",
"noun",
"=",
"self",
".",
"_read_fileng",
"(",
"4",
")",
"data",
"=",
"dict",
"(",
"kind",
"=",
"kind",
",",
"length",
"=",
"size",
",",
"req_rate",
"=",
"int",
"(",
"rvrr",
"[",
"4",
":",
"]",
",",
"base",
"=",
"2",
")",
",",
"ttl_diff",
"=",
"ttld",
",",
"nounce",
"=",
"noun",
"[",
":",
"-",
"2",
"]",
",",
")",
"return",
"data"
] | 42.093023 | 0.00108 | [
"def _read_mode_qsopt(self, size, kind):\n",
" \"\"\"Read Quick-Start Response option.\n",
"\n",
" Positional arguments:\n",
" * size - int, length of option\n",
" * kind - int, 27 (Quick-Start Response)\n",
"\n",
" Returns:\n",
" * dict -- extracted Quick-Start Response (QS) option\n",
"\n",
" Structure of TCP QSopt [RFC 4782]:\n",
" 0 1 2 3\n",
" 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n",
" +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n",
" | Kind | Length=8 | Resv. | Rate | TTL Diff |\n",
" | | | |Request| |\n",
" +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n",
" | QS Nonce | R |\n",
" +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n",
"\n",
" Octets Bits Name Description\n",
" 0 0 tcp.qs.kind Kind (27)\n",
" 1 8 tcp.qs.length Length (8)\n",
" 2 16 - Reserved (must be zero)\n",
" 2 20 tcp.qs.req_rate Request Rate\n",
" 3 24 tcp.qs.ttl_diff TTL Difference\n",
" 4 32 tcp.qs.nounce QS Nounce\n",
" 7 62 - Reserved (must be zero)\n",
"\n",
" \"\"\"\n",
" rvrr = self._read_binary(1)\n",
" ttld = self._read_unpack(1)\n",
" noun = self._read_fileng(4)\n",
"\n",
" data = dict(\n",
" kind=kind,\n",
" length=size,\n",
" req_rate=int(rvrr[4:], base=2),\n",
" ttl_diff=ttld,\n",
" nounce=noun[:-2],\n",
" )\n",
"\n",
" return data"
] | [
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 43 | 0.001741 |
def msetnx(self, mapping):
"""Sets the given keys to their respective values.
:meth:`~tredis.RedisClient.msetnx` will not perform any operation at
all even if just a single key already exists.
Because of this semantic :meth:`~tredis.RedisClient.msetnx` can be used
in order to set different keys representing different fields of an
unique logic object in a way that ensures that either all the fields or
none at all are set.
:meth:`~tredis.RedisClient.msetnx` is atomic, so all given keys are set
at once. It is not possible for clients to see that some of the keys
were updated while others are unchanged.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of
keys to set.
:param dict mapping: A mapping of key/value pairs to set
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [b'MSETNX']
for key, value in mapping.items():
command += [key, value]
return self._execute(command, 1) | [
"def",
"msetnx",
"(",
"self",
",",
"mapping",
")",
":",
"command",
"=",
"[",
"b'MSETNX'",
"]",
"for",
"key",
",",
"value",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"command",
"+=",
"[",
"key",
",",
"value",
"]",
"return",
"self",
".",
"_execute",
"(",
"command",
",",
"1",
")"
] | 39.285714 | 0.001775 | [
"def msetnx(self, mapping):\n",
" \"\"\"Sets the given keys to their respective values.\n",
" :meth:`~tredis.RedisClient.msetnx` will not perform any operation at\n",
" all even if just a single key already exists.\n",
"\n",
" Because of this semantic :meth:`~tredis.RedisClient.msetnx` can be used\n",
" in order to set different keys representing different fields of an\n",
" unique logic object in a way that ensures that either all the fields or\n",
" none at all are set.\n",
"\n",
" :meth:`~tredis.RedisClient.msetnx` is atomic, so all given keys are set\n",
" at once. It is not possible for clients to see that some of the keys\n",
" were updated while others are unchanged.\n",
"\n",
" .. versionadded:: 0.2.0\n",
"\n",
" .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of\n",
" keys to set.\n",
"\n",
" :param dict mapping: A mapping of key/value pairs to set\n",
" :rtype: bool\n",
" :raises: :exc:`~tredis.exceptions.RedisError`\n",
"\n",
" \"\"\"\n",
" command = [b'MSETNX']\n",
" for key, value in mapping.items():\n",
" command += [key, value]\n",
" return self._execute(command, 1)"
] | [
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025
] | 28 | 0.001498 |
def tail_ratio(returns):
"""Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
Returns
-------
tail_ratio : float
"""
if len(returns) < 1:
return np.nan
returns = np.asanyarray(returns)
# Be tolerant of nan's
returns = returns[~np.isnan(returns)]
if len(returns) < 1:
return np.nan
return np.abs(np.percentile(returns, 95)) / \
np.abs(np.percentile(returns, 5)) | [
"def",
"tail_ratio",
"(",
"returns",
")",
":",
"if",
"len",
"(",
"returns",
")",
"<",
"1",
":",
"return",
"np",
".",
"nan",
"returns",
"=",
"np",
".",
"asanyarray",
"(",
"returns",
")",
"# Be tolerant of nan's",
"returns",
"=",
"returns",
"[",
"~",
"np",
".",
"isnan",
"(",
"returns",
")",
"]",
"if",
"len",
"(",
"returns",
")",
"<",
"1",
":",
"return",
"np",
".",
"nan",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"percentile",
"(",
"returns",
",",
"95",
")",
")",
"/",
"np",
".",
"abs",
"(",
"np",
".",
"percentile",
"(",
"returns",
",",
"5",
")",
")"
] | 25.214286 | 0.001364 | [
"def tail_ratio(returns):\n",
" \"\"\"Determines the ratio between the right (95%) and left tail (5%).\n",
"\n",
" For example, a ratio of 0.25 means that losses are four times\n",
" as bad as profits.\n",
"\n",
" Parameters\n",
" ----------\n",
" returns : pd.Series or np.ndarray\n",
" Daily returns of the strategy, noncumulative.\n",
" - See full explanation in :func:`~empyrical.stats.cum_returns`.\n",
"\n",
" Returns\n",
" -------\n",
" tail_ratio : float\n",
" \"\"\"\n",
"\n",
" if len(returns) < 1:\n",
" return np.nan\n",
"\n",
" returns = np.asanyarray(returns)\n",
" # Be tolerant of nan's\n",
" returns = returns[~np.isnan(returns)]\n",
" if len(returns) < 1:\n",
" return np.nan\n",
"\n",
" return np.abs(np.percentile(returns, 95)) / \\\n",
" np.abs(np.percentile(returns, 5))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025
] | 28 | 0.000871 |
def add(self, rule: 'functions.ReplacementRule') -> None:
"""Add a new rule to the replacer.
Args:
rule:
The rule to add.
"""
self.matcher.add(rule.pattern, rule.replacement) | [
"def",
"add",
"(",
"self",
",",
"rule",
":",
"'functions.ReplacementRule'",
")",
"->",
"None",
":",
"self",
".",
"matcher",
".",
"add",
"(",
"rule",
".",
"pattern",
",",
"rule",
".",
"replacement",
")"
] | 28.5 | 0.008511 | [
"def add(self, rule: 'functions.ReplacementRule') -> None:\n",
" \"\"\"Add a new rule to the replacer.\n",
"\n",
" Args:\n",
" rule:\n",
" The rule to add.\n",
" \"\"\"\n",
" self.matcher.add(rule.pattern, rule.replacement)"
] | [
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0.017857142857142856
] | 8 | 0.005139 |
def print_markdown(data, title=None):
"""Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
"""
markdown = []
for key, value in data.items():
if isinstance(value, basestring_) and Path(value).exists():
continue
markdown.append("* **{}:** {}".format(key, unicode_(value)))
if title:
print("\n## {}".format(title))
print("\n{}\n".format("\n".join(markdown))) | [
"def",
"print_markdown",
"(",
"data",
",",
"title",
"=",
"None",
")",
":",
"markdown",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring_",
")",
"and",
"Path",
"(",
"value",
")",
".",
"exists",
"(",
")",
":",
"continue",
"markdown",
".",
"append",
"(",
"\"* **{}:** {}\"",
".",
"format",
"(",
"key",
",",
"unicode_",
"(",
"value",
")",
")",
")",
"if",
"title",
":",
"print",
"(",
"\"\\n## {}\"",
".",
"format",
"(",
"title",
")",
")",
"print",
"(",
"\"\\n{}\\n\"",
".",
"format",
"(",
"\"\\n\"",
".",
"join",
"(",
"markdown",
")",
")",
")"
] | 38.428571 | 0.001815 | [
"def print_markdown(data, title=None):\n",
" \"\"\"Print data in GitHub-flavoured Markdown format for issues etc.\n",
"\n",
" data (dict or list of tuples): Label/value pairs.\n",
" title (unicode or None): Title, will be rendered as headline 2.\n",
" \"\"\"\n",
" markdown = []\n",
" for key, value in data.items():\n",
" if isinstance(value, basestring_) and Path(value).exists():\n",
" continue\n",
" markdown.append(\"* **{}:** {}\".format(key, unicode_(value)))\n",
" if title:\n",
" print(\"\\n## {}\".format(title))\n",
" print(\"\\n{}\\n\".format(\"\\n\".join(markdown)))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085
] | 14 | 0.00152 |
def get_img(self, url, headers=None, cookies=None, timeout=60, verify=False, proxies=None, allow_redirects=True,
params=None):
"""
get方式获取 img 二进制信息
:param url: 访问Url
:param headers: 请求头
:param cookies: 请求cookies
:param timeout: 超时时间
:param verify: ssl验证
:param proxies: 代理
:param allow_redirects: 是否允许重定向
:param encoding: 返回的html编码s
:param params: 查询请求参数
:return: 二进制图片数据
"""
if self.session:
r = self.session.get(url, headers=odict(headers), cookies=cookies, timeout=timeout, verify=verify,
proxies=proxies, allow_redirects=allow_redirects, params=params)
else:
r = requests.get(url, headers=odict(headers), cookies=cookies, timeout=timeout, verify=verify,
proxies=proxies, allow_redirects=allow_redirects, params=params)
r.raise_for_status()
return r.content | [
"def",
"get_img",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"None",
",",
"cookies",
"=",
"None",
",",
"timeout",
"=",
"60",
",",
"verify",
"=",
"False",
",",
"proxies",
"=",
"None",
",",
"allow_redirects",
"=",
"True",
",",
"params",
"=",
"None",
")",
":",
"if",
"self",
".",
"session",
":",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"odict",
"(",
"headers",
")",
",",
"cookies",
"=",
"cookies",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"proxies",
"=",
"proxies",
",",
"allow_redirects",
"=",
"allow_redirects",
",",
"params",
"=",
"params",
")",
"else",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"odict",
"(",
"headers",
")",
",",
"cookies",
"=",
"cookies",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"proxies",
"=",
"proxies",
",",
"allow_redirects",
"=",
"allow_redirects",
",",
"params",
"=",
"params",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"r",
".",
"content"
] | 42.478261 | 0.008008 | [
"def get_img(self, url, headers=None, cookies=None, timeout=60, verify=False, proxies=None, allow_redirects=True,\n",
" params=None):\n",
" \"\"\"\n",
" get方式获取 img 二进制信息\n",
" :param url: 访问Url\n",
" :param headers: 请求头\n",
" :param cookies: 请求cookies\n",
" :param timeout: 超时时间\n",
" :param verify: ssl验证\n",
" :param proxies: 代理\n",
" :param allow_redirects: 是否允许重定向\n",
" :param encoding: 返回的html编码s\n",
" :param params: 查询请求参数\n",
" :return: 二进制图片数据\n",
" \"\"\"\n",
" if self.session:\n",
" r = self.session.get(url, headers=odict(headers), cookies=cookies, timeout=timeout, verify=verify,\n",
" proxies=proxies, allow_redirects=allow_redirects, params=params)\n",
" else:\n",
" r = requests.get(url, headers=odict(headers), cookies=cookies, timeout=timeout, verify=verify,\n",
" proxies=proxies, allow_redirects=allow_redirects, params=params)\n",
" r.raise_for_status()\n",
" return r.content"
] | [
0.008849557522123894,
0.03333333333333333,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0.01020408163265306,
0,
0.009345794392523364,
0.010638297872340425,
0,
0.041666666666666664
] | 23 | 0.008973 |
def _other_wrapper(self, name, writing):
"""Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
"""Wrap all other calls to the stream Object.
We do this to track changes to the write pointer. Anything that
moves the write pointer in a file open for appending should move
the read pointer as well.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
Wrapped stream object method.
"""
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
if not writing or not IS_PY2:
return ret_value
return other_wrapper | [
"def",
"_other_wrapper",
"(",
"self",
",",
"name",
",",
"writing",
")",
":",
"io_attr",
"=",
"getattr",
"(",
"self",
".",
"_io",
",",
"name",
")",
"def",
"other_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrap all other calls to the stream Object.\n\n We do this to track changes to the write pointer. Anything that\n moves the write pointer in a file open for appending should move\n the read pointer as well.\n\n Args:\n *args: Pass through args.\n **kwargs: Pass through kwargs.\n\n Returns:\n Wrapped stream object method.\n \"\"\"",
"write_seek",
"=",
"self",
".",
"_io",
".",
"tell",
"(",
")",
"ret_value",
"=",
"io_attr",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"write_seek",
"!=",
"self",
".",
"_io",
".",
"tell",
"(",
")",
":",
"self",
".",
"_read_seek",
"=",
"self",
".",
"_io",
".",
"tell",
"(",
")",
"self",
".",
"_read_whence",
"=",
"0",
"if",
"not",
"writing",
"or",
"not",
"IS_PY2",
":",
"return",
"ret_value",
"return",
"other_wrapper"
] | 31.617647 | 0.001805 | [
"def _other_wrapper(self, name, writing):\n",
" \"\"\"Wrap a stream attribute in an other_wrapper.\n",
"\n",
" Args:\n",
" name: the name of the stream attribute to wrap.\n",
"\n",
" Returns:\n",
" other_wrapper which is described below.\n",
" \"\"\"\n",
" io_attr = getattr(self._io, name)\n",
"\n",
" def other_wrapper(*args, **kwargs):\n",
" \"\"\"Wrap all other calls to the stream Object.\n",
"\n",
" We do this to track changes to the write pointer. Anything that\n",
" moves the write pointer in a file open for appending should move\n",
" the read pointer as well.\n",
"\n",
" Args:\n",
" *args: Pass through args.\n",
" **kwargs: Pass through kwargs.\n",
"\n",
" Returns:\n",
" Wrapped stream object method.\n",
" \"\"\"\n",
" write_seek = self._io.tell()\n",
" ret_value = io_attr(*args, **kwargs)\n",
" if write_seek != self._io.tell():\n",
" self._read_seek = self._io.tell()\n",
" self._read_whence = 0\n",
" if not writing or not IS_PY2:\n",
" return ret_value\n",
"\n",
" return other_wrapper"
] | [
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571
] | 34 | 0.001576 |
def register_signals(self):
"""Register signals."""
from .models import Collection
from .receivers import CollectionUpdater
if self.app.config['COLLECTIONS_USE_PERCOLATOR']:
from .percolator import collection_inserted_percolator, \
collection_removed_percolator, \
collection_updated_percolator
# Register collection signals to update percolators
listen(Collection, 'after_insert',
collection_inserted_percolator)
listen(Collection, 'after_update',
collection_updated_percolator)
listen(Collection, 'after_delete',
collection_removed_percolator)
# Register Record signals to update record['_collections']
self.update_function = CollectionUpdater(app=self.app)
signals.before_record_insert.connect(self.update_function,
weak=False)
signals.before_record_update.connect(self.update_function,
weak=False) | [
"def",
"register_signals",
"(",
"self",
")",
":",
"from",
".",
"models",
"import",
"Collection",
"from",
".",
"receivers",
"import",
"CollectionUpdater",
"if",
"self",
".",
"app",
".",
"config",
"[",
"'COLLECTIONS_USE_PERCOLATOR'",
"]",
":",
"from",
".",
"percolator",
"import",
"collection_inserted_percolator",
",",
"collection_removed_percolator",
",",
"collection_updated_percolator",
"# Register collection signals to update percolators",
"listen",
"(",
"Collection",
",",
"'after_insert'",
",",
"collection_inserted_percolator",
")",
"listen",
"(",
"Collection",
",",
"'after_update'",
",",
"collection_updated_percolator",
")",
"listen",
"(",
"Collection",
",",
"'after_delete'",
",",
"collection_removed_percolator",
")",
"# Register Record signals to update record['_collections']",
"self",
".",
"update_function",
"=",
"CollectionUpdater",
"(",
"app",
"=",
"self",
".",
"app",
")",
"signals",
".",
"before_record_insert",
".",
"connect",
"(",
"self",
".",
"update_function",
",",
"weak",
"=",
"False",
")",
"signals",
".",
"before_record_update",
".",
"connect",
"(",
"self",
".",
"update_function",
",",
"weak",
"=",
"False",
")"
] | 49.272727 | 0.00181 | [
"def register_signals(self):\n",
" \"\"\"Register signals.\"\"\"\n",
" from .models import Collection\n",
" from .receivers import CollectionUpdater\n",
"\n",
" if self.app.config['COLLECTIONS_USE_PERCOLATOR']:\n",
" from .percolator import collection_inserted_percolator, \\\n",
" collection_removed_percolator, \\\n",
" collection_updated_percolator\n",
" # Register collection signals to update percolators\n",
" listen(Collection, 'after_insert',\n",
" collection_inserted_percolator)\n",
" listen(Collection, 'after_update',\n",
" collection_updated_percolator)\n",
" listen(Collection, 'after_delete',\n",
" collection_removed_percolator)\n",
" # Register Record signals to update record['_collections']\n",
" self.update_function = CollectionUpdater(app=self.app)\n",
" signals.before_record_insert.connect(self.update_function,\n",
" weak=False)\n",
" signals.before_record_update.connect(self.update_function,\n",
" weak=False)"
] | [
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856
] | 22 | 0.002232 |
def Lab_to_XYZ(cobj, *args, **kwargs):
"""
Convert from Lab to XYZ
"""
illum = cobj.get_illuminant_xyz()
xyz_y = (cobj.lab_l + 16.0) / 116.0
xyz_x = cobj.lab_a / 500.0 + xyz_y
xyz_z = xyz_y - cobj.lab_b / 200.0
if math.pow(xyz_y, 3) > color_constants.CIE_E:
xyz_y = math.pow(xyz_y, 3)
else:
xyz_y = (xyz_y - 16.0 / 116.0) / 7.787
if math.pow(xyz_x, 3) > color_constants.CIE_E:
xyz_x = math.pow(xyz_x, 3)
else:
xyz_x = (xyz_x - 16.0 / 116.0) / 7.787
if math.pow(xyz_z, 3) > color_constants.CIE_E:
xyz_z = math.pow(xyz_z, 3)
else:
xyz_z = (xyz_z - 16.0 / 116.0) / 7.787
xyz_x = (illum["X"] * xyz_x)
xyz_y = (illum["Y"] * xyz_y)
xyz_z = (illum["Z"] * xyz_z)
return XYZColor(
xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant) | [
"def",
"Lab_to_XYZ",
"(",
"cobj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"illum",
"=",
"cobj",
".",
"get_illuminant_xyz",
"(",
")",
"xyz_y",
"=",
"(",
"cobj",
".",
"lab_l",
"+",
"16.0",
")",
"/",
"116.0",
"xyz_x",
"=",
"cobj",
".",
"lab_a",
"/",
"500.0",
"+",
"xyz_y",
"xyz_z",
"=",
"xyz_y",
"-",
"cobj",
".",
"lab_b",
"/",
"200.0",
"if",
"math",
".",
"pow",
"(",
"xyz_y",
",",
"3",
")",
">",
"color_constants",
".",
"CIE_E",
":",
"xyz_y",
"=",
"math",
".",
"pow",
"(",
"xyz_y",
",",
"3",
")",
"else",
":",
"xyz_y",
"=",
"(",
"xyz_y",
"-",
"16.0",
"/",
"116.0",
")",
"/",
"7.787",
"if",
"math",
".",
"pow",
"(",
"xyz_x",
",",
"3",
")",
">",
"color_constants",
".",
"CIE_E",
":",
"xyz_x",
"=",
"math",
".",
"pow",
"(",
"xyz_x",
",",
"3",
")",
"else",
":",
"xyz_x",
"=",
"(",
"xyz_x",
"-",
"16.0",
"/",
"116.0",
")",
"/",
"7.787",
"if",
"math",
".",
"pow",
"(",
"xyz_z",
",",
"3",
")",
">",
"color_constants",
".",
"CIE_E",
":",
"xyz_z",
"=",
"math",
".",
"pow",
"(",
"xyz_z",
",",
"3",
")",
"else",
":",
"xyz_z",
"=",
"(",
"xyz_z",
"-",
"16.0",
"/",
"116.0",
")",
"/",
"7.787",
"xyz_x",
"=",
"(",
"illum",
"[",
"\"X\"",
"]",
"*",
"xyz_x",
")",
"xyz_y",
"=",
"(",
"illum",
"[",
"\"Y\"",
"]",
"*",
"xyz_y",
")",
"xyz_z",
"=",
"(",
"illum",
"[",
"\"Z\"",
"]",
"*",
"xyz_z",
")",
"return",
"XYZColor",
"(",
"xyz_x",
",",
"xyz_y",
",",
"xyz_z",
",",
"observer",
"=",
"cobj",
".",
"observer",
",",
"illuminant",
"=",
"cobj",
".",
"illuminant",
")"
] | 28.133333 | 0.002291 | [
"def Lab_to_XYZ(cobj, *args, **kwargs):\n",
" \"\"\"\n",
" Convert from Lab to XYZ\n",
" \"\"\"\n",
" illum = cobj.get_illuminant_xyz()\n",
" xyz_y = (cobj.lab_l + 16.0) / 116.0\n",
" xyz_x = cobj.lab_a / 500.0 + xyz_y\n",
" xyz_z = xyz_y - cobj.lab_b / 200.0\n",
"\n",
" if math.pow(xyz_y, 3) > color_constants.CIE_E:\n",
" xyz_y = math.pow(xyz_y, 3)\n",
" else:\n",
" xyz_y = (xyz_y - 16.0 / 116.0) / 7.787\n",
"\n",
" if math.pow(xyz_x, 3) > color_constants.CIE_E:\n",
" xyz_x = math.pow(xyz_x, 3)\n",
" else:\n",
" xyz_x = (xyz_x - 16.0 / 116.0) / 7.787\n",
"\n",
" if math.pow(xyz_z, 3) > color_constants.CIE_E:\n",
" xyz_z = math.pow(xyz_z, 3)\n",
" else:\n",
" xyz_z = (xyz_z - 16.0 / 116.0) / 7.787\n",
"\n",
" xyz_x = (illum[\"X\"] * xyz_x)\n",
" xyz_y = (illum[\"Y\"] * xyz_y)\n",
" xyz_z = (illum[\"Z\"] * xyz_z)\n",
"\n",
" return XYZColor(\n",
" xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025
] | 30 | 0.000833 |
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram | [
"def",
"decode",
"(",
"encoded_histogram",
",",
"b64_wrap",
"=",
"True",
")",
":",
"hdr_payload",
"=",
"HdrHistogramEncoder",
".",
"decode",
"(",
"encoded_histogram",
",",
"b64_wrap",
")",
"payload",
"=",
"hdr_payload",
".",
"payload",
"histogram",
"=",
"HdrHistogram",
"(",
"payload",
".",
"lowest_trackable_value",
",",
"payload",
".",
"highest_trackable_value",
",",
"payload",
".",
"significant_figures",
",",
"hdr_payload",
"=",
"hdr_payload",
")",
"return",
"histogram"
] | 49.565217 | 0.001721 | [
"def decode(encoded_histogram, b64_wrap=True):\n",
" '''Decode an encoded histogram and return a new histogram instance that\n",
" has been initialized with the decoded content\n",
" Return:\n",
" a new histogram instance representing the decoded content\n",
" Exception:\n",
" TypeError in case of base64 decode error\n",
" HdrCookieException:\n",
" the main header has an invalid cookie\n",
" the compressed payload header has an invalid cookie\n",
" HdrLengthException:\n",
" the decompressed size is too small for the HdrPayload structure\n",
" or is not aligned or is too large for the passed payload class\n",
" zlib.error:\n",
" in case of zlib decompression error\n",
" '''\n",
" hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)\n",
" payload = hdr_payload.payload\n",
" histogram = HdrHistogram(payload.lowest_trackable_value,\n",
" payload.highest_trackable_value,\n",
" payload.significant_figures,\n",
" hdr_payload=hdr_payload)\n",
" return histogram"
] | [
0,
0.0125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664
] | 23 | 0.002355 |
def generate_supplied_intersect_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to run an intersect query
using the supplied results sets."""
parser = subparsers.add_parser(
'sintersect', description=constants.SUPPLIED_INTERSECT_DESCRIPTION,
epilog=constants.SUPPLIED_INTERSECT_EPILOG,
formatter_class=ParagraphFormatter,
help=constants.SUPPLIED_INTERSECT_HELP)
parser.set_defaults(func=supplied_intersect)
utils.add_common_arguments(parser)
utils.add_db_arguments(parser, True)
utils.add_supplied_query_arguments(parser) | [
"def",
"generate_supplied_intersect_subparser",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'sintersect'",
",",
"description",
"=",
"constants",
".",
"SUPPLIED_INTERSECT_DESCRIPTION",
",",
"epilog",
"=",
"constants",
".",
"SUPPLIED_INTERSECT_EPILOG",
",",
"formatter_class",
"=",
"ParagraphFormatter",
",",
"help",
"=",
"constants",
".",
"SUPPLIED_INTERSECT_HELP",
")",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"supplied_intersect",
")",
"utils",
".",
"add_common_arguments",
"(",
"parser",
")",
"utils",
".",
"add_db_arguments",
"(",
"parser",
",",
"True",
")",
"utils",
".",
"add_supplied_query_arguments",
"(",
"parser",
")"
] | 49.166667 | 0.001664 | [
"def generate_supplied_intersect_subparser(subparsers):\n",
" \"\"\"Adds a sub-command parser to `subparsers` to run an intersect query\n",
" using the supplied results sets.\"\"\"\n",
" parser = subparsers.add_parser(\n",
" 'sintersect', description=constants.SUPPLIED_INTERSECT_DESCRIPTION,\n",
" epilog=constants.SUPPLIED_INTERSECT_EPILOG,\n",
" formatter_class=ParagraphFormatter,\n",
" help=constants.SUPPLIED_INTERSECT_HELP)\n",
" parser.set_defaults(func=supplied_intersect)\n",
" utils.add_common_arguments(parser)\n",
" utils.add_db_arguments(parser, True)\n",
" utils.add_supplied_query_arguments(parser)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608
] | 12 | 0.001812 |
def import_experience(self, experiences):
"""
Imports experiences.
Args:
experiences:
"""
if isinstance(experiences, dict):
if self.unique_state:
experiences['states'] = dict(state=experiences['states'])
if self.unique_action:
experiences['actions'] = dict(action=experiences['actions'])
self.model.import_experience(**experiences)
else:
if self.unique_state:
states = dict(state=list())
else:
states = {name: list() for name in experiences[0]['states']}
internals = [list() for _ in experiences[0]['internals']]
if self.unique_action:
actions = dict(action=list())
else:
actions = {name: list() for name in experiences[0]['actions']}
terminal = list()
reward = list()
for experience in experiences:
if self.unique_state:
states['state'].append(experience['states'])
else:
for name in sorted(states):
states[name].append(experience['states'][name])
for n, internal in enumerate(internals):
internal.append(experience['internals'][n])
if self.unique_action:
actions['action'].append(experience['actions'])
else:
for name in sorted(actions):
actions[name].append(experience['actions'][name])
terminal.append(experience['terminal'])
reward.append(experience['reward'])
self.model.import_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
) | [
"def",
"import_experience",
"(",
"self",
",",
"experiences",
")",
":",
"if",
"isinstance",
"(",
"experiences",
",",
"dict",
")",
":",
"if",
"self",
".",
"unique_state",
":",
"experiences",
"[",
"'states'",
"]",
"=",
"dict",
"(",
"state",
"=",
"experiences",
"[",
"'states'",
"]",
")",
"if",
"self",
".",
"unique_action",
":",
"experiences",
"[",
"'actions'",
"]",
"=",
"dict",
"(",
"action",
"=",
"experiences",
"[",
"'actions'",
"]",
")",
"self",
".",
"model",
".",
"import_experience",
"(",
"*",
"*",
"experiences",
")",
"else",
":",
"if",
"self",
".",
"unique_state",
":",
"states",
"=",
"dict",
"(",
"state",
"=",
"list",
"(",
")",
")",
"else",
":",
"states",
"=",
"{",
"name",
":",
"list",
"(",
")",
"for",
"name",
"in",
"experiences",
"[",
"0",
"]",
"[",
"'states'",
"]",
"}",
"internals",
"=",
"[",
"list",
"(",
")",
"for",
"_",
"in",
"experiences",
"[",
"0",
"]",
"[",
"'internals'",
"]",
"]",
"if",
"self",
".",
"unique_action",
":",
"actions",
"=",
"dict",
"(",
"action",
"=",
"list",
"(",
")",
")",
"else",
":",
"actions",
"=",
"{",
"name",
":",
"list",
"(",
")",
"for",
"name",
"in",
"experiences",
"[",
"0",
"]",
"[",
"'actions'",
"]",
"}",
"terminal",
"=",
"list",
"(",
")",
"reward",
"=",
"list",
"(",
")",
"for",
"experience",
"in",
"experiences",
":",
"if",
"self",
".",
"unique_state",
":",
"states",
"[",
"'state'",
"]",
".",
"append",
"(",
"experience",
"[",
"'states'",
"]",
")",
"else",
":",
"for",
"name",
"in",
"sorted",
"(",
"states",
")",
":",
"states",
"[",
"name",
"]",
".",
"append",
"(",
"experience",
"[",
"'states'",
"]",
"[",
"name",
"]",
")",
"for",
"n",
",",
"internal",
"in",
"enumerate",
"(",
"internals",
")",
":",
"internal",
".",
"append",
"(",
"experience",
"[",
"'internals'",
"]",
"[",
"n",
"]",
")",
"if",
"self",
".",
"unique_action",
":",
"actions",
"[",
"'action'",
"]",
".",
"append",
"(",
"experience",
"[",
"'actions'",
"]",
")",
"else",
":",
"for",
"name",
"in",
"sorted",
"(",
"actions",
")",
":",
"actions",
"[",
"name",
"]",
".",
"append",
"(",
"experience",
"[",
"'actions'",
"]",
"[",
"name",
"]",
")",
"terminal",
".",
"append",
"(",
"experience",
"[",
"'terminal'",
"]",
")",
"reward",
".",
"append",
"(",
"experience",
"[",
"'reward'",
"]",
")",
"self",
".",
"model",
".",
"import_experience",
"(",
"states",
"=",
"states",
",",
"internals",
"=",
"internals",
",",
"actions",
"=",
"actions",
",",
"terminal",
"=",
"terminal",
",",
"reward",
"=",
"reward",
")"
] | 37.078431 | 0.001546 | [
"def import_experience(self, experiences):\n",
" \"\"\"\n",
" Imports experiences.\n",
"\n",
" Args:\n",
" experiences: \n",
" \"\"\"\n",
" if isinstance(experiences, dict):\n",
" if self.unique_state:\n",
" experiences['states'] = dict(state=experiences['states'])\n",
" if self.unique_action:\n",
" experiences['actions'] = dict(action=experiences['actions'])\n",
"\n",
" self.model.import_experience(**experiences)\n",
"\n",
" else:\n",
" if self.unique_state:\n",
" states = dict(state=list())\n",
" else:\n",
" states = {name: list() for name in experiences[0]['states']}\n",
" internals = [list() for _ in experiences[0]['internals']]\n",
" if self.unique_action:\n",
" actions = dict(action=list())\n",
" else:\n",
" actions = {name: list() for name in experiences[0]['actions']}\n",
" terminal = list()\n",
" reward = list()\n",
"\n",
" for experience in experiences:\n",
" if self.unique_state:\n",
" states['state'].append(experience['states'])\n",
" else:\n",
" for name in sorted(states):\n",
" states[name].append(experience['states'][name])\n",
" for n, internal in enumerate(internals):\n",
" internal.append(experience['internals'][n])\n",
" if self.unique_action:\n",
" actions['action'].append(experience['actions'])\n",
" else:\n",
" for name in sorted(actions):\n",
" actions[name].append(experience['actions'][name])\n",
" terminal.append(experience['terminal'])\n",
" reward.append(experience['reward'])\n",
"\n",
" self.model.import_experience(\n",
" states=states,\n",
" internals=internals,\n",
" actions=actions,\n",
" terminal=terminal,\n",
" reward=reward\n",
" )"
] | [
0,
0.08333333333333333,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693
] | 51 | 0.003896 |
def str_arg(d):
"""formats a function argument prettily not as code
dicts are expressed in {key=value} syntax
strings are formatted using str in quotes not repr"""
if not d:
return None
if isinstance(d, dict):
if len(d) == 2 and d.get('type') == 'text' and 'value' in d:
return str_arg(d['value'])
if len(d) == 2 and d.get('type') == 'text' and 'subkey' in d:
return ".%s" % d['subkey']
if d.get('type') == 'module':
return None
return "{%s}" % str_args(d.items())
if isinstance(d, list):
if len(d) == 1:
return str_arg(d[0])
return "[%s]" % ", ".join(str_arg(elem) for elem in d)
if isinstance(d, unicode):
return '"%s"' % d
return repr(d) | [
"def",
"str_arg",
"(",
"d",
")",
":",
"if",
"not",
"d",
":",
"return",
"None",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"if",
"len",
"(",
"d",
")",
"==",
"2",
"and",
"d",
".",
"get",
"(",
"'type'",
")",
"==",
"'text'",
"and",
"'value'",
"in",
"d",
":",
"return",
"str_arg",
"(",
"d",
"[",
"'value'",
"]",
")",
"if",
"len",
"(",
"d",
")",
"==",
"2",
"and",
"d",
".",
"get",
"(",
"'type'",
")",
"==",
"'text'",
"and",
"'subkey'",
"in",
"d",
":",
"return",
"\".%s\"",
"%",
"d",
"[",
"'subkey'",
"]",
"if",
"d",
".",
"get",
"(",
"'type'",
")",
"==",
"'module'",
":",
"return",
"None",
"return",
"\"{%s}\"",
"%",
"str_args",
"(",
"d",
".",
"items",
"(",
")",
")",
"if",
"isinstance",
"(",
"d",
",",
"list",
")",
":",
"if",
"len",
"(",
"d",
")",
"==",
"1",
":",
"return",
"str_arg",
"(",
"d",
"[",
"0",
"]",
")",
"return",
"\"[%s]\"",
"%",
"\", \"",
".",
"join",
"(",
"str_arg",
"(",
"elem",
")",
"for",
"elem",
"in",
"d",
")",
"if",
"isinstance",
"(",
"d",
",",
"unicode",
")",
":",
"return",
"'\"%s\"'",
"%",
"d",
"return",
"repr",
"(",
"d",
")"
] | 33.217391 | 0.001272 | [
"def str_arg(d):\n",
" \"\"\"formats a function argument prettily not as code\n",
"\n",
" dicts are expressed in {key=value} syntax\n",
" strings are formatted using str in quotes not repr\"\"\"\n",
" if not d:\n",
" return None\n",
" if isinstance(d, dict):\n",
" if len(d) == 2 and d.get('type') == 'text' and 'value' in d:\n",
" return str_arg(d['value'])\n",
" if len(d) == 2 and d.get('type') == 'text' and 'subkey' in d:\n",
" return \".%s\" % d['subkey']\n",
" if d.get('type') == 'module':\n",
" return None\n",
" return \"{%s}\" % str_args(d.items())\n",
" if isinstance(d, list):\n",
" if len(d) == 1:\n",
" return str_arg(d[0])\n",
" return \"[%s]\" % \", \".join(str_arg(elem) for elem in d)\n",
" if isinstance(d, unicode):\n",
" return '\"%s\"' % d\n",
"\n",
" return repr(d)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 23 | 0.002415 |
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
except CalledProcessError as e:
if e.returncode == 2:
return None
raise | [
"def",
"relation_get",
"(",
"attribute",
"=",
"None",
",",
"unit",
"=",
"None",
",",
"rid",
"=",
"None",
")",
":",
"_args",
"=",
"[",
"'relation-get'",
",",
"'--format=json'",
"]",
"if",
"rid",
":",
"_args",
".",
"append",
"(",
"'-r'",
")",
"_args",
".",
"append",
"(",
"rid",
")",
"_args",
".",
"append",
"(",
"attribute",
"or",
"'-'",
")",
"if",
"unit",
":",
"_args",
".",
"append",
"(",
"unit",
")",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"subprocess",
".",
"check_output",
"(",
"_args",
")",
".",
"decode",
"(",
"'UTF-8'",
")",
")",
"except",
"ValueError",
":",
"return",
"None",
"except",
"CalledProcessError",
"as",
"e",
":",
"if",
"e",
".",
"returncode",
"==",
"2",
":",
"return",
"None",
"raise"
] | 28.764706 | 0.00198 | [
"def relation_get(attribute=None, unit=None, rid=None):\n",
" \"\"\"Get relation information\"\"\"\n",
" _args = ['relation-get', '--format=json']\n",
" if rid:\n",
" _args.append('-r')\n",
" _args.append(rid)\n",
" _args.append(attribute or '-')\n",
" if unit:\n",
" _args.append(unit)\n",
" try:\n",
" return json.loads(subprocess.check_output(_args).decode('UTF-8'))\n",
" except ValueError:\n",
" return None\n",
" except CalledProcessError as e:\n",
" if e.returncode == 2:\n",
" return None\n",
" raise"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693
] | 17 | 0.004525 |
def crontab(
state, host, command, present=True, user=None,
minute='*', hour='*', month='*', day_of_week='*', day_of_month='*',
):
'''
Add/remove/update crontab entries.
+ command: the command for the cron
+ present: whether this cron command should exist
+ user: the user whose crontab to manage
+ minute: which minutes to execute the cron
+ hour: which hours to execute the cron
+ month: which months to execute the cron
+ day_of_week: which day of the week to execute the cron
+ day_of_month: which day of the month to execute the cron
Cron commands:
The command is used to identify crontab entries - this means commands
must be unique within a given users crontab. If you require multiple
identical commands, prefix each with a nonce environment variable.
'''
crontab = host.fact.crontab(user)
exists = command in crontab
edit_commands = []
temp_filename = state.get_temp_filename()
new_crontab_line = '{minute} {hour} {day_of_month} {month} {day_of_week} {command}'.format(
command=command,
minute=minute,
hour=hour,
month=month,
day_of_week=day_of_week,
day_of_month=day_of_month,
)
existing_crontab_match = '.*{0}.*'.format(command)
# Don't want the cron and it does exist? Remove the line
if not present and exists:
edit_commands.append(sed_replace(
temp_filename, existing_crontab_match, '',
))
# Want the cron but it doesn't exist? Append the line
elif present and not exists:
edit_commands.append('echo {0} >> {1}'.format(
shlex_quote(new_crontab_line), temp_filename,
))
# We have the cron and it exists, do it's details? If not, replace the line
elif present and exists:
existing_details = crontab[command]
if any((
minute != existing_details['minute'],
hour != existing_details['hour'],
month != existing_details['month'],
day_of_week != existing_details['day_of_week'],
day_of_month != existing_details['day_of_month'],
)):
edit_commands.append(sed_replace(
temp_filename, existing_crontab_match, new_crontab_line,
))
if edit_commands:
crontab_args = []
if user:
crontab_args.append('-u {0}'.format(user))
# List the crontab into a temporary file if it exists
if crontab:
yield 'crontab -l {0} > {1}'.format(' '.join(crontab_args), temp_filename)
# Now yield any edits
for command in edit_commands:
yield command
# Finally, use the tempfile to write a new crontab
yield 'crontab {0} {1}'.format(' '.join(crontab_args), temp_filename) | [
"def",
"crontab",
"(",
"state",
",",
"host",
",",
"command",
",",
"present",
"=",
"True",
",",
"user",
"=",
"None",
",",
"minute",
"=",
"'*'",
",",
"hour",
"=",
"'*'",
",",
"month",
"=",
"'*'",
",",
"day_of_week",
"=",
"'*'",
",",
"day_of_month",
"=",
"'*'",
",",
")",
":",
"crontab",
"=",
"host",
".",
"fact",
".",
"crontab",
"(",
"user",
")",
"exists",
"=",
"command",
"in",
"crontab",
"edit_commands",
"=",
"[",
"]",
"temp_filename",
"=",
"state",
".",
"get_temp_filename",
"(",
")",
"new_crontab_line",
"=",
"'{minute} {hour} {day_of_month} {month} {day_of_week} {command}'",
".",
"format",
"(",
"command",
"=",
"command",
",",
"minute",
"=",
"minute",
",",
"hour",
"=",
"hour",
",",
"month",
"=",
"month",
",",
"day_of_week",
"=",
"day_of_week",
",",
"day_of_month",
"=",
"day_of_month",
",",
")",
"existing_crontab_match",
"=",
"'.*{0}.*'",
".",
"format",
"(",
"command",
")",
"# Don't want the cron and it does exist? Remove the line",
"if",
"not",
"present",
"and",
"exists",
":",
"edit_commands",
".",
"append",
"(",
"sed_replace",
"(",
"temp_filename",
",",
"existing_crontab_match",
",",
"''",
",",
")",
")",
"# Want the cron but it doesn't exist? Append the line",
"elif",
"present",
"and",
"not",
"exists",
":",
"edit_commands",
".",
"append",
"(",
"'echo {0} >> {1}'",
".",
"format",
"(",
"shlex_quote",
"(",
"new_crontab_line",
")",
",",
"temp_filename",
",",
")",
")",
"# We have the cron and it exists, do it's details? If not, replace the line",
"elif",
"present",
"and",
"exists",
":",
"existing_details",
"=",
"crontab",
"[",
"command",
"]",
"if",
"any",
"(",
"(",
"minute",
"!=",
"existing_details",
"[",
"'minute'",
"]",
",",
"hour",
"!=",
"existing_details",
"[",
"'hour'",
"]",
",",
"month",
"!=",
"existing_details",
"[",
"'month'",
"]",
",",
"day_of_week",
"!=",
"existing_details",
"[",
"'day_of_week'",
"]",
",",
"day_of_month",
"!=",
"existing_details",
"[",
"'day_of_month'",
"]",
",",
")",
")",
":",
"edit_commands",
".",
"append",
"(",
"sed_replace",
"(",
"temp_filename",
",",
"existing_crontab_match",
",",
"new_crontab_line",
",",
")",
")",
"if",
"edit_commands",
":",
"crontab_args",
"=",
"[",
"]",
"if",
"user",
":",
"crontab_args",
".",
"append",
"(",
"'-u {0}'",
".",
"format",
"(",
"user",
")",
")",
"# List the crontab into a temporary file if it exists",
"if",
"crontab",
":",
"yield",
"'crontab -l {0} > {1}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"crontab_args",
")",
",",
"temp_filename",
")",
"# Now yield any edits",
"for",
"command",
"in",
"edit_commands",
":",
"yield",
"command",
"# Finally, use the tempfile to write a new crontab",
"yield",
"'crontab {0} {1}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"crontab_args",
")",
",",
"temp_filename",
")"
] | 34.325 | 0.001062 | [
"def crontab(\n",
" state, host, command, present=True, user=None,\n",
" minute='*', hour='*', month='*', day_of_week='*', day_of_month='*',\n",
"):\n",
" '''\n",
" Add/remove/update crontab entries.\n",
"\n",
" + command: the command for the cron\n",
" + present: whether this cron command should exist\n",
" + user: the user whose crontab to manage\n",
" + minute: which minutes to execute the cron\n",
" + hour: which hours to execute the cron\n",
" + month: which months to execute the cron\n",
" + day_of_week: which day of the week to execute the cron\n",
" + day_of_month: which day of the month to execute the cron\n",
"\n",
" Cron commands:\n",
" The command is used to identify crontab entries - this means commands\n",
" must be unique within a given users crontab. If you require multiple\n",
" identical commands, prefix each with a nonce environment variable.\n",
" '''\n",
"\n",
" crontab = host.fact.crontab(user)\n",
" exists = command in crontab\n",
"\n",
" edit_commands = []\n",
" temp_filename = state.get_temp_filename()\n",
"\n",
" new_crontab_line = '{minute} {hour} {day_of_month} {month} {day_of_week} {command}'.format(\n",
" command=command,\n",
" minute=minute,\n",
" hour=hour,\n",
" month=month,\n",
" day_of_week=day_of_week,\n",
" day_of_month=day_of_month,\n",
" )\n",
" existing_crontab_match = '.*{0}.*'.format(command)\n",
"\n",
" # Don't want the cron and it does exist? Remove the line\n",
" if not present and exists:\n",
" edit_commands.append(sed_replace(\n",
" temp_filename, existing_crontab_match, '',\n",
" ))\n",
"\n",
" # Want the cron but it doesn't exist? Append the line\n",
" elif present and not exists:\n",
" edit_commands.append('echo {0} >> {1}'.format(\n",
" shlex_quote(new_crontab_line), temp_filename,\n",
" ))\n",
"\n",
" # We have the cron and it exists, do it's details? If not, replace the line\n",
" elif present and exists:\n",
" existing_details = crontab[command]\n",
"\n",
" if any((\n",
" minute != existing_details['minute'],\n",
" hour != existing_details['hour'],\n",
" month != existing_details['month'],\n",
" day_of_week != existing_details['day_of_week'],\n",
" day_of_month != existing_details['day_of_month'],\n",
" )):\n",
" edit_commands.append(sed_replace(\n",
" temp_filename, existing_crontab_match, new_crontab_line,\n",
" ))\n",
"\n",
" if edit_commands:\n",
" crontab_args = []\n",
" if user:\n",
" crontab_args.append('-u {0}'.format(user))\n",
"\n",
" # List the crontab into a temporary file if it exists\n",
" if crontab:\n",
" yield 'crontab -l {0} > {1}'.format(' '.join(crontab_args), temp_filename)\n",
"\n",
" # Now yield any edits\n",
" for command in edit_commands:\n",
" yield command\n",
"\n",
" # Finally, use the tempfile to write a new crontab\n",
" yield 'crontab {0} {1}'.format(' '.join(crontab_args), temp_filename)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0.012987012987012988
] | 80 | 0.000436 |
def _phi2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_phi2deriv
PURPOSE:
evaluate the second azimuthal derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second azimuthal derivative
"""
if not self.isNonAxi:
phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
Fx= self._xforce_xyz(x,y,z)
Fy= self._yforce_xyz(x,y,z)
Fxy= np.dot(self.rot(t, transposed = True),np.array([Fx,Fy]))
Fx, Fy= Fxy[0], Fxy[1]
phixxa= self._2ndderiv_xyz(x,y,z,0,0)
phixya= self._2ndderiv_xyz(x,y,z,0,1)
phiyya= self._2ndderiv_xyz(x,y,z,1,1)
ang = self._omegab*t + self._pa
c, s = np.cos(ang), np.sin(ang)
phixx = c**2*phixxa + 2.*c*s*phixya + s**2*phiyya
phixy = (c**2-s**2)*phixya + c*s*(phiyya - phixxa)
phiyy = s**2*phixxa - 2.*c*s*phixya + c**2*phiyya
return R**2.*(np.sin(phi)**2.*phixx+np.cos(phi)**2.*phiyy\
-2.*np.cos(phi)*np.sin(phi)*phixy)\
+R*(np.cos(phi)*Fx+np.sin(phi)*Fy) | [
"def",
"_phi2deriv",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"if",
"not",
"self",
".",
"isNonAxi",
":",
"phi",
"=",
"0.",
"x",
",",
"y",
",",
"z",
"=",
"self",
".",
"_compute_xyz",
"(",
"R",
",",
"phi",
",",
"z",
",",
"t",
")",
"Fx",
"=",
"self",
".",
"_xforce_xyz",
"(",
"x",
",",
"y",
",",
"z",
")",
"Fy",
"=",
"self",
".",
"_yforce_xyz",
"(",
"x",
",",
"y",
",",
"z",
")",
"Fxy",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"rot",
"(",
"t",
",",
"transposed",
"=",
"True",
")",
",",
"np",
".",
"array",
"(",
"[",
"Fx",
",",
"Fy",
"]",
")",
")",
"Fx",
",",
"Fy",
"=",
"Fxy",
"[",
"0",
"]",
",",
"Fxy",
"[",
"1",
"]",
"phixxa",
"=",
"self",
".",
"_2ndderiv_xyz",
"(",
"x",
",",
"y",
",",
"z",
",",
"0",
",",
"0",
")",
"phixya",
"=",
"self",
".",
"_2ndderiv_xyz",
"(",
"x",
",",
"y",
",",
"z",
",",
"0",
",",
"1",
")",
"phiyya",
"=",
"self",
".",
"_2ndderiv_xyz",
"(",
"x",
",",
"y",
",",
"z",
",",
"1",
",",
"1",
")",
"ang",
"=",
"self",
".",
"_omegab",
"*",
"t",
"+",
"self",
".",
"_pa",
"c",
",",
"s",
"=",
"np",
".",
"cos",
"(",
"ang",
")",
",",
"np",
".",
"sin",
"(",
"ang",
")",
"phixx",
"=",
"c",
"**",
"2",
"*",
"phixxa",
"+",
"2.",
"*",
"c",
"*",
"s",
"*",
"phixya",
"+",
"s",
"**",
"2",
"*",
"phiyya",
"phixy",
"=",
"(",
"c",
"**",
"2",
"-",
"s",
"**",
"2",
")",
"*",
"phixya",
"+",
"c",
"*",
"s",
"*",
"(",
"phiyya",
"-",
"phixxa",
")",
"phiyy",
"=",
"s",
"**",
"2",
"*",
"phixxa",
"-",
"2.",
"*",
"c",
"*",
"s",
"*",
"phixya",
"+",
"c",
"**",
"2",
"*",
"phiyya",
"return",
"R",
"**",
"2.",
"*",
"(",
"np",
".",
"sin",
"(",
"phi",
")",
"**",
"2.",
"*",
"phixx",
"+",
"np",
".",
"cos",
"(",
"phi",
")",
"**",
"2.",
"*",
"phiyy",
"-",
"2.",
"*",
"np",
".",
"cos",
"(",
"phi",
")",
"*",
"np",
".",
"sin",
"(",
"phi",
")",
"*",
"phixy",
")",
"+",
"R",
"*",
"(",
"np",
".",
"cos",
"(",
"phi",
")",
"*",
"Fx",
"+",
"np",
".",
"sin",
"(",
"phi",
")",
"*",
"Fy",
")"
] | 37.59375 | 0.036467 | [
"def _phi2deriv(self,R,z,phi=0.,t=0.):\n",
" \"\"\"\n",
" NAME:\n",
" _phi2deriv\n",
" PURPOSE:\n",
" evaluate the second azimuthal derivative for this potential\n",
" INPUT:\n",
" R - Galactocentric cylindrical radius\n",
" z - vertical height\n",
" phi - azimuth\n",
" t - time\n",
" OUTPUT:\n",
" the second azimuthal derivative\n",
" \"\"\"\n",
" if not self.isNonAxi:\n",
" phi= 0.\n",
" x,y,z= self._compute_xyz(R,phi,z,t)\n",
" Fx= self._xforce_xyz(x,y,z)\n",
" Fy= self._yforce_xyz(x,y,z)\n",
" Fxy= np.dot(self.rot(t, transposed = True),np.array([Fx,Fy]))\n",
" Fx, Fy= Fxy[0], Fxy[1]\n",
" phixxa= self._2ndderiv_xyz(x,y,z,0,0)\n",
" phixya= self._2ndderiv_xyz(x,y,z,0,1)\n",
" phiyya= self._2ndderiv_xyz(x,y,z,1,1)\n",
" ang = self._omegab*t + self._pa\n",
" c, s = np.cos(ang), np.sin(ang)\n",
" phixx = c**2*phixxa + 2.*c*s*phixya + s**2*phiyya\n",
" phixy = (c**2-s**2)*phixya + c*s*(phiyya - phixxa)\n",
" phiyy = s**2*phixxa - 2.*c*s*phixya + c**2*phiyya\n",
" return R**2.*(np.sin(phi)**2.*phixx+np.cos(phi)**2.*phiyy\\\n",
" -2.*np.cos(phi)*np.sin(phi)*phixy)\\\n",
" +R*(np.cos(phi)*Fx+np.sin(phi)*Fy)"
] | [
0.10526315789473684,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0.13636363636363635,
0.08333333333333333,
0.08333333333333333,
0.07142857142857142,
0.03225806451612903,
0.10869565217391304,
0.10869565217391304,
0.10869565217391304,
0,
0,
0,
0,
0,
0.014925373134328358,
0.03225806451612903,
0.05
] | 32 | 0.033393 |
def update_where(self, col, value, where_col_list, where_value_list):
"""
updates the array to set cell = value where col_list == val_list
"""
if type(col) is str:
col_ndx = self.get_col_by_name(col)
else:
col_ndx = col
#print('col_ndx = ', col_ndx )
#print("updating " + col + " to " , value, " where " , where_col_list , " = " , where_value_list)
new_arr = self.select_where(where_col_list, where_value_list)
#print('new_arr', new_arr)
for r in new_arr:
self.arr[r[0]][col_ndx] = value | [
"def",
"update_where",
"(",
"self",
",",
"col",
",",
"value",
",",
"where_col_list",
",",
"where_value_list",
")",
":",
"if",
"type",
"(",
"col",
")",
"is",
"str",
":",
"col_ndx",
"=",
"self",
".",
"get_col_by_name",
"(",
"col",
")",
"else",
":",
"col_ndx",
"=",
"col",
"#print('col_ndx = ', col_ndx )",
"#print(\"updating \" + col + \" to \" , value, \" where \" , where_col_list , \" = \" , where_value_list)",
"new_arr",
"=",
"self",
".",
"select_where",
"(",
"where_col_list",
",",
"where_value_list",
")",
"#print('new_arr', new_arr)",
"for",
"r",
"in",
"new_arr",
":",
"self",
".",
"arr",
"[",
"r",
"[",
"0",
"]",
"]",
"[",
"col_ndx",
"]",
"=",
"value"
] | 42.428571 | 0.011532 | [
"def update_where(self, col, value, where_col_list, where_value_list):\n",
" \"\"\" \n",
" updates the array to set cell = value where col_list == val_list\n",
" \"\"\"\n",
" if type(col) is str:\n",
" col_ndx = self.get_col_by_name(col)\n",
" else:\n",
" col_ndx = col\n",
" #print('col_ndx = ', col_ndx )\n",
" #print(\"updating \" + col + \" to \" , value, \" where \" , where_col_list , \" = \" , where_value_list)\n",
" new_arr = self.select_where(where_col_list, where_value_list)\n",
" #print('new_arr', new_arr)\n",
" for r in new_arr:\n",
" self.arr[r[0]][col_ndx] = value"
] | [
0,
0.15384615384615385,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0.018867924528301886,
0,
0.02857142857142857,
0,
0.023255813953488372
] | 14 | 0.017739 |
def reset(self):
""" Reset/remove all layers, keeping only the initial volume. """
self.layers = {}
self.stack = []
self.set_mask()
self.n_vox_in_vol = len(np.where(self.current_mask)[0]) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"layers",
"=",
"{",
"}",
"self",
".",
"stack",
"=",
"[",
"]",
"self",
".",
"set_mask",
"(",
")",
"self",
".",
"n_vox_in_vol",
"=",
"len",
"(",
"np",
".",
"where",
"(",
"self",
".",
"current_mask",
")",
"[",
"0",
"]",
")"
] | 37 | 0.008811 | [
"def reset(self):\n",
" \"\"\" Reset/remove all layers, keeping only the initial volume. \"\"\"\n",
" self.layers = {}\n",
" self.stack = []\n",
" self.set_mask()\n",
" self.n_vox_in_vol = len(np.where(self.current_mask)[0])"
] | [
0,
0.013513513513513514,
0,
0,
0,
0.015873015873015872
] | 6 | 0.004898 |
def setEnv(self, name, value=None):
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
NB: Only the Mesos and single-machine batch systems support passing environment
variables. On other batch systems, this method has no effect. See
https://github.com/BD2KGenomics/toil/issues/547.
:param str name: the environment variable to be set on the worker.
:param str value: if given, the environment variable given by name will be set to this value.
if None, the variable's current value will be used as the value on the worker
:raise RuntimeError: if value is None and the name cannot be found in the environment
"""
if value is None:
try:
value = os.environ[name]
except KeyError:
raise RuntimeError("%s does not exist in current environment", name)
self.environment[name] = value | [
"def",
"setEnv",
"(",
"self",
",",
"name",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"try",
":",
"value",
"=",
"os",
".",
"environ",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"RuntimeError",
"(",
"\"%s does not exist in current environment\"",
",",
"name",
")",
"self",
".",
"environment",
"[",
"name",
"]",
"=",
"value"
] | 54.551724 | 0.008696 | [
"def setEnv(self, name, value=None):\n",
" \"\"\"\n",
" Set an environment variable for the worker process before it is launched. The worker\n",
" process will typically inherit the environment of the machine it is running on but this\n",
" method makes it possible to override specific variables in that inherited environment\n",
" before the worker is launched. Note that this mechanism is different to the one used by\n",
" the worker internally to set up the environment of a job. A call to this method affects\n",
" all jobs issued after this method returns. Note to implementors: This means that you\n",
" would typically need to copy the variables before enqueuing a job.\n",
"\n",
" If no value is provided it will be looked up from the current environment.\n",
"\n",
" NB: Only the Mesos and single-machine batch systems support passing environment\n",
" variables. On other batch systems, this method has no effect. See\n",
" https://github.com/BD2KGenomics/toil/issues/547.\n",
"\n",
" :param str name: the environment variable to be set on the worker.\n",
"\n",
" :param str value: if given, the environment variable given by name will be set to this value.\n",
" if None, the variable's current value will be used as the value on the worker\n",
"\n",
" :raise RuntimeError: if value is None and the name cannot be found in the environment\n",
" \"\"\"\n",
" if value is None:\n",
" try:\n",
" value = os.environ[name]\n",
" except KeyError:\n",
" raise RuntimeError(\"%s does not exist in current environment\", name)\n",
" self.environment[name] = value"
] | [
0,
0.08333333333333333,
0.010752688172043012,
0.010416666666666666,
0.010638297872340425,
0.010416666666666666,
0.010416666666666666,
0.010752688172043012,
0,
0,
0.012048192771084338,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0.00980392156862745,
0.010752688172043012,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0.011764705882352941,
0.02631578947368421
] | 29 | 0.008256 |
def QA_SU_save_option_commodity_min(
client=DATABASE,
ui_log=None,
ui_progress=None
):
'''
:param client:
:return:
'''
# 测试中发现, 一起回去,容易出现错误,每次获取一个品种后 ,更换服务ip继续获取 ?
_save_option_commodity_cu_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_sr_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_m_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_ru_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_cf_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_c_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
) | [
"def",
"QA_SU_save_option_commodity_min",
"(",
"client",
"=",
"DATABASE",
",",
"ui_log",
"=",
"None",
",",
"ui_progress",
"=",
"None",
")",
":",
"# 测试中发现, 一起回去,容易出现错误,每次获取一个品种后 ,更换服务ip继续获取 ?",
"_save_option_commodity_cu_min",
"(",
"client",
"=",
"client",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
")",
"_save_option_commodity_sr_min",
"(",
"client",
"=",
"client",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
")",
"_save_option_commodity_m_min",
"(",
"client",
"=",
"client",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
")",
"_save_option_commodity_ru_min",
"(",
"client",
"=",
"client",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
")",
"_save_option_commodity_cf_min",
"(",
"client",
"=",
"client",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
")",
"_save_option_commodity_c_min",
"(",
"client",
"=",
"client",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
")"
] | 20.181818 | 0.001074 | [
"def QA_SU_save_option_commodity_min(\n",
" client=DATABASE,\n",
" ui_log=None,\n",
" ui_progress=None\n",
"):\n",
" '''\n",
" :param client:\n",
" :return:\n",
" '''\n",
" # 测试中发现, 一起回去,容易出现错误,每次获取一个品种后 ,更换服务ip继续获取 ?\n",
"\n",
" _save_option_commodity_cu_min(\n",
" client=client,\n",
" ui_log=ui_log,\n",
" ui_progress=ui_progress\n",
" )\n",
" _save_option_commodity_sr_min(\n",
" client=client,\n",
" ui_log=ui_log,\n",
" ui_progress=ui_progress\n",
" )\n",
" _save_option_commodity_m_min(\n",
" client=client,\n",
" ui_log=ui_log,\n",
" ui_progress=ui_progress\n",
" )\n",
"\n",
" _save_option_commodity_ru_min(\n",
" client=client,\n",
" ui_log=ui_log,\n",
" ui_progress=ui_progress\n",
" )\n",
"\n",
" _save_option_commodity_cf_min(\n",
" client=client,\n",
" ui_log=ui_log,\n",
" ui_progress=ui_progress\n",
" )\n",
"\n",
" _save_option_commodity_c_min(\n",
" client=client,\n",
" ui_log=ui_log,\n",
" ui_progress=ui_progress\n",
" )"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2
] | 44 | 0.004545 |
def get_full_md5(self, partial_md5, collection):
"""Support partial/short md5s, return the full md5 with this method"""
print 'Notice: Performing slow md5 search...'
starts_with = '%s.*' % partial_md5
sample_info = self.database[collection].find_one({'md5': {'$regex' : starts_with}},{'md5':1})
return sample_info['md5'] if sample_info else None | [
"def",
"get_full_md5",
"(",
"self",
",",
"partial_md5",
",",
"collection",
")",
":",
"print",
"'Notice: Performing slow md5 search...'",
"starts_with",
"=",
"'%s.*'",
"%",
"partial_md5",
"sample_info",
"=",
"self",
".",
"database",
"[",
"collection",
"]",
".",
"find_one",
"(",
"{",
"'md5'",
":",
"{",
"'$regex'",
":",
"starts_with",
"}",
"}",
",",
"{",
"'md5'",
":",
"1",
"}",
")",
"return",
"sample_info",
"[",
"'md5'",
"]",
"if",
"sample_info",
"else",
"None"
] | 63.333333 | 0.015584 | [
"def get_full_md5(self, partial_md5, collection):\n",
" \"\"\"Support partial/short md5s, return the full md5 with this method\"\"\"\n",
" print 'Notice: Performing slow md5 search...'\n",
" starts_with = '%s.*' % partial_md5\n",
" sample_info = self.database[collection].find_one({'md5': {'$regex' : starts_with}},{'md5':1})\n",
" return sample_info['md5'] if sample_info else None"
] | [
0,
0.012658227848101266,
0,
0,
0.0392156862745098,
0.017241379310344827
] | 6 | 0.011519 |
def cmd_connect(node, cmd_name, node_info):
"""Connect to node."""
# FUTURE: call function to check for custom connection-info
conn_info = "Defaults"
conf_mess = ("\r{0}{1} TO{2} {3} using {5}{4}{2} - Confirm [y/N]: ".
format(C_STAT[cmd_name.upper()], cmd_name.upper(), C_NORM,
node_info, conn_info, C_HEAD2))
cmd_result = None
if input_yn(conf_mess):
exec_mess = ("\r{0}CONNECTING TO{1} {2} using {4}{3}{1}: ".
format(C_STAT[cmd_name.upper()], C_NORM, node_info,
conn_info, C_HEAD2))
ui_erase_ln()
ui_print(exec_mess)
(ssh_user, ssh_key) = ssh_get_info(node)
if ssh_user:
ssh_cmd = "ssh {0}{1}@{2}".format(ssh_key, ssh_user,
node.public_ips)
else:
ssh_cmd = "ssh {0}{1}".format(ssh_key, node.public_ips)
print("\n")
ui_print("\033[?25h") # cursor on
subprocess.call(ssh_cmd, shell=True)
ui_print("\033[?25l") # cursor off
print()
cmd_result = True
else:
ui_print_suffix("Command Aborted")
sleep(0.75)
return cmd_result | [
"def",
"cmd_connect",
"(",
"node",
",",
"cmd_name",
",",
"node_info",
")",
":",
"# FUTURE: call function to check for custom connection-info",
"conn_info",
"=",
"\"Defaults\"",
"conf_mess",
"=",
"(",
"\"\\r{0}{1} TO{2} {3} using {5}{4}{2} - Confirm [y/N]: \"",
".",
"format",
"(",
"C_STAT",
"[",
"cmd_name",
".",
"upper",
"(",
")",
"]",
",",
"cmd_name",
".",
"upper",
"(",
")",
",",
"C_NORM",
",",
"node_info",
",",
"conn_info",
",",
"C_HEAD2",
")",
")",
"cmd_result",
"=",
"None",
"if",
"input_yn",
"(",
"conf_mess",
")",
":",
"exec_mess",
"=",
"(",
"\"\\r{0}CONNECTING TO{1} {2} using {4}{3}{1}: \"",
".",
"format",
"(",
"C_STAT",
"[",
"cmd_name",
".",
"upper",
"(",
")",
"]",
",",
"C_NORM",
",",
"node_info",
",",
"conn_info",
",",
"C_HEAD2",
")",
")",
"ui_erase_ln",
"(",
")",
"ui_print",
"(",
"exec_mess",
")",
"(",
"ssh_user",
",",
"ssh_key",
")",
"=",
"ssh_get_info",
"(",
"node",
")",
"if",
"ssh_user",
":",
"ssh_cmd",
"=",
"\"ssh {0}{1}@{2}\"",
".",
"format",
"(",
"ssh_key",
",",
"ssh_user",
",",
"node",
".",
"public_ips",
")",
"else",
":",
"ssh_cmd",
"=",
"\"ssh {0}{1}\"",
".",
"format",
"(",
"ssh_key",
",",
"node",
".",
"public_ips",
")",
"print",
"(",
"\"\\n\"",
")",
"ui_print",
"(",
"\"\\033[?25h\"",
")",
"# cursor on",
"subprocess",
".",
"call",
"(",
"ssh_cmd",
",",
"shell",
"=",
"True",
")",
"ui_print",
"(",
"\"\\033[?25l\"",
")",
"# cursor off",
"print",
"(",
")",
"cmd_result",
"=",
"True",
"else",
":",
"ui_print_suffix",
"(",
"\"Command Aborted\"",
")",
"sleep",
"(",
"0.75",
")",
"return",
"cmd_result"
] | 39.866667 | 0.000816 | [
"def cmd_connect(node, cmd_name, node_info):\n",
" \"\"\"Connect to node.\"\"\"\n",
" # FUTURE: call function to check for custom connection-info\n",
" conn_info = \"Defaults\"\n",
" conf_mess = (\"\\r{0}{1} TO{2} {3} using {5}{4}{2} - Confirm [y/N]: \".\n",
" format(C_STAT[cmd_name.upper()], cmd_name.upper(), C_NORM,\n",
" node_info, conn_info, C_HEAD2))\n",
" cmd_result = None\n",
" if input_yn(conf_mess):\n",
" exec_mess = (\"\\r{0}CONNECTING TO{1} {2} using {4}{3}{1}: \".\n",
" format(C_STAT[cmd_name.upper()], C_NORM, node_info,\n",
" conn_info, C_HEAD2))\n",
" ui_erase_ln()\n",
" ui_print(exec_mess)\n",
" (ssh_user, ssh_key) = ssh_get_info(node)\n",
" if ssh_user:\n",
" ssh_cmd = \"ssh {0}{1}@{2}\".format(ssh_key, ssh_user,\n",
" node.public_ips)\n",
" else:\n",
" ssh_cmd = \"ssh {0}{1}\".format(ssh_key, node.public_ips)\n",
" print(\"\\n\")\n",
" ui_print(\"\\033[?25h\") # cursor on\n",
" subprocess.call(ssh_cmd, shell=True)\n",
" ui_print(\"\\033[?25l\") # cursor off\n",
" print()\n",
" cmd_result = True\n",
" else:\n",
" ui_print_suffix(\"Command Aborted\")\n",
" sleep(0.75)\n",
" return cmd_result"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 30 | 0.001587 |
def lucas_gas(T, Tc, Pc, Zc, MW, dipole=0, CASRN=None):
r'''Estimate the viscosity of a gas using an emperical
formula developed in several sources, but as discussed in [1]_ as the
original sources are in German or merely personal communications with the
authors of [1]_.
.. math::
\eta = \left[0.807T_r^{0.618}-0.357\exp(-0.449T_r) + 0.340\exp(-4.058
T_r) + 0.018\right]F_p^\circ F_Q^\circ /\xi
F_p^\circ=1, 0 \le \mu_{r} < 0.022
F_p^\circ = 1+30.55(0.292-Z_c)^{1.72}, 0.022 \le \mu_{r} < 0.075
F_p^\circ = 1+30.55(0.292-Z_c)^{1.72}|0.96+0.1(T_r-0.7)| 0.075 < \mu_{r}
F_Q^\circ = 1.22Q^{0.15}\left\{ 1+0.00385[(T_r-12)^2]^{1/M}\text{sign}
(T_r-12)\right\}
\mu_r = 52.46 \frac{\mu^2 P_c}{T_c^2}
\xi=0.176\left(\frac{T_c}{MW^3 P_c^4}\right)^{1/6}
Parameters
----------
T : float
Temperature of fluid [K]
Tc: float
Critical point of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
Zc : float
Critical compressibility of the fluid [Pa]
dipole : float
Dipole moment of fluid [debye]
CASRN : str, optional
CAS of the fluid
Returns
-------
mu_g : float
Viscosity of gas, [Pa*s]
Notes
-----
The example is from [1]_; all results agree.
Viscosity is calculated in micropoise, and converted to SI internally (1E-7).
Q for He = 1.38; Q for H2 = 0.76; Q for D2 = 0.52.
Examples
--------
>>> lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)
1.7822676912698928e-05
References
----------
.. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.
Properties of Gases and Liquids. McGraw-Hill Companies, 1987.
'''
Tr = T/Tc
xi = 0.176*(Tc/MW**3/(Pc/1E5)**4)**(1/6.) # bar arrording to example in Poling
if dipole is None:
dipole = 0
dipoler = 52.46*dipole**2*(Pc/1E5)/Tc**2 # bar arrording to example in Poling
if dipoler < 0.022:
Fp = 1
elif 0.022 <= dipoler < 0.075:
Fp = 1 + 30.55*(0.292 - Zc)**1.72
else:
Fp = 1 + 30.55*(0.292 - Zc)**1.72*abs(0.96 + 0.1*(Tr-0.7))
if CASRN and CASRN in _lucas_Q_dict:
Q = _lucas_Q_dict[CASRN]
if Tr - 12 > 0:
value = 1
else:
value = -1
FQ = 1.22*Q**0.15*(1 + 0.00385*((Tr-12)**2)**(1./MW)*value)
else:
FQ = 1
eta = (0.807*Tr**0.618 - 0.357*exp(-0.449*Tr) + 0.340*exp(-4.058*Tr) + 0.018)*Fp*FQ/xi
return eta/1E7 | [
"def",
"lucas_gas",
"(",
"T",
",",
"Tc",
",",
"Pc",
",",
"Zc",
",",
"MW",
",",
"dipole",
"=",
"0",
",",
"CASRN",
"=",
"None",
")",
":",
"Tr",
"=",
"T",
"/",
"Tc",
"xi",
"=",
"0.176",
"*",
"(",
"Tc",
"/",
"MW",
"**",
"3",
"/",
"(",
"Pc",
"/",
"1E5",
")",
"**",
"4",
")",
"**",
"(",
"1",
"/",
"6.",
")",
"# bar arrording to example in Poling",
"if",
"dipole",
"is",
"None",
":",
"dipole",
"=",
"0",
"dipoler",
"=",
"52.46",
"*",
"dipole",
"**",
"2",
"*",
"(",
"Pc",
"/",
"1E5",
")",
"/",
"Tc",
"**",
"2",
"# bar arrording to example in Poling",
"if",
"dipoler",
"<",
"0.022",
":",
"Fp",
"=",
"1",
"elif",
"0.022",
"<=",
"dipoler",
"<",
"0.075",
":",
"Fp",
"=",
"1",
"+",
"30.55",
"*",
"(",
"0.292",
"-",
"Zc",
")",
"**",
"1.72",
"else",
":",
"Fp",
"=",
"1",
"+",
"30.55",
"*",
"(",
"0.292",
"-",
"Zc",
")",
"**",
"1.72",
"*",
"abs",
"(",
"0.96",
"+",
"0.1",
"*",
"(",
"Tr",
"-",
"0.7",
")",
")",
"if",
"CASRN",
"and",
"CASRN",
"in",
"_lucas_Q_dict",
":",
"Q",
"=",
"_lucas_Q_dict",
"[",
"CASRN",
"]",
"if",
"Tr",
"-",
"12",
">",
"0",
":",
"value",
"=",
"1",
"else",
":",
"value",
"=",
"-",
"1",
"FQ",
"=",
"1.22",
"*",
"Q",
"**",
"0.15",
"*",
"(",
"1",
"+",
"0.00385",
"*",
"(",
"(",
"Tr",
"-",
"12",
")",
"**",
"2",
")",
"**",
"(",
"1.",
"/",
"MW",
")",
"*",
"value",
")",
"else",
":",
"FQ",
"=",
"1",
"eta",
"=",
"(",
"0.807",
"*",
"Tr",
"**",
"0.618",
"-",
"0.357",
"*",
"exp",
"(",
"-",
"0.449",
"*",
"Tr",
")",
"+",
"0.340",
"*",
"exp",
"(",
"-",
"4.058",
"*",
"Tr",
")",
"+",
"0.018",
")",
"*",
"Fp",
"*",
"FQ",
"/",
"xi",
"return",
"eta",
"/",
"1E7"
] | 30.765432 | 0.002333 | [
"def lucas_gas(T, Tc, Pc, Zc, MW, dipole=0, CASRN=None):\n",
" r'''Estimate the viscosity of a gas using an emperical\n",
" formula developed in several sources, but as discussed in [1]_ as the\n",
" original sources are in German or merely personal communications with the\n",
" authors of [1]_.\n",
"\n",
" .. math::\n",
" \\eta = \\left[0.807T_r^{0.618}-0.357\\exp(-0.449T_r) + 0.340\\exp(-4.058\n",
" T_r) + 0.018\\right]F_p^\\circ F_Q^\\circ /\\xi\n",
"\n",
" F_p^\\circ=1, 0 \\le \\mu_{r} < 0.022\n",
"\n",
" F_p^\\circ = 1+30.55(0.292-Z_c)^{1.72}, 0.022 \\le \\mu_{r} < 0.075\n",
"\n",
" F_p^\\circ = 1+30.55(0.292-Z_c)^{1.72}|0.96+0.1(T_r-0.7)| 0.075 < \\mu_{r}\n",
"\n",
" F_Q^\\circ = 1.22Q^{0.15}\\left\\{ 1+0.00385[(T_r-12)^2]^{1/M}\\text{sign}\n",
" (T_r-12)\\right\\}\n",
"\n",
" \\mu_r = 52.46 \\frac{\\mu^2 P_c}{T_c^2}\n",
"\n",
" \\xi=0.176\\left(\\frac{T_c}{MW^3 P_c^4}\\right)^{1/6}\n",
"\n",
" Parameters\n",
" ----------\n",
" T : float\n",
" Temperature of fluid [K]\n",
" Tc: float\n",
" Critical point of fluid [K]\n",
" Pc : float\n",
" Critical pressure of the fluid [Pa]\n",
" Zc : float\n",
" Critical compressibility of the fluid [Pa]\n",
" dipole : float\n",
" Dipole moment of fluid [debye]\n",
" CASRN : str, optional\n",
" CAS of the fluid\n",
"\n",
" Returns\n",
" -------\n",
" mu_g : float\n",
" Viscosity of gas, [Pa*s]\n",
"\n",
" Notes\n",
" -----\n",
" The example is from [1]_; all results agree.\n",
" Viscosity is calculated in micropoise, and converted to SI internally (1E-7).\n",
" Q for He = 1.38; Q for H2 = 0.76; Q for D2 = 0.52.\n",
"\n",
" Examples\n",
" --------\n",
" >>> lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)\n",
" 1.7822676912698928e-05\n",
"\n",
" References\n",
" ----------\n",
" .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.\n",
" Properties of Gases and Liquids. McGraw-Hill Companies, 1987.\n",
" '''\n",
" Tr = T/Tc\n",
" xi = 0.176*(Tc/MW**3/(Pc/1E5)**4)**(1/6.) # bar arrording to example in Poling\n",
" if dipole is None:\n",
" dipole = 0\n",
" dipoler = 52.46*dipole**2*(Pc/1E5)/Tc**2 # bar arrording to example in Poling\n",
" if dipoler < 0.022:\n",
" Fp = 1\n",
" elif 0.022 <= dipoler < 0.075:\n",
" Fp = 1 + 30.55*(0.292 - Zc)**1.72\n",
" else:\n",
" Fp = 1 + 30.55*(0.292 - Zc)**1.72*abs(0.96 + 0.1*(Tr-0.7))\n",
" if CASRN and CASRN in _lucas_Q_dict:\n",
" Q = _lucas_Q_dict[CASRN]\n",
" if Tr - 12 > 0:\n",
" value = 1\n",
" else:\n",
" value = -1\n",
" FQ = 1.22*Q**0.15*(1 + 0.00385*((Tr-12)**2)**(1./MW)*value)\n",
" else:\n",
" FQ = 1\n",
" eta = (0.807*Tr**0.618 - 0.357*exp(-0.449*Tr) + 0.340*exp(-4.058*Tr) + 0.018)*Fp*FQ/xi\n",
" return eta/1E7"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0.05555555555555555
] | 81 | 0.00142 |
def items(self):
""" Return all merged items as iterator """
if not self.pdata and not self.spills:
return iter(self.data.items())
return self._external_items() | [
"def",
"items",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"pdata",
"and",
"not",
"self",
".",
"spills",
":",
"return",
"iter",
"(",
"self",
".",
"data",
".",
"items",
"(",
")",
")",
"return",
"self",
".",
"_external_items",
"(",
")"
] | 38.4 | 0.010204 | [
"def items(self):\n",
" \"\"\" Return all merged items as iterator \"\"\"\n",
" if not self.pdata and not self.spills:\n",
" return iter(self.data.items())\n",
" return self._external_items()"
] | [
0,
0.019230769230769232,
0,
0,
0.02702702702702703
] | 5 | 0.009252 |
def _parse_hostvar_dir(self, inventory_path):
"""
Parse host_vars dir, if it exists.
"""
# inventory_path could point to a `hosts` file, or to a dir. So we
# construct the location to the `host_vars` differently.
if os.path.isdir(inventory_path):
path = os.path.join(inventory_path, 'host_vars')
else:
path = os.path.join(os.path.dirname(inventory_path), 'host_vars')
self.log.debug("Parsing host vars (dir): {0}".format(path))
if not os.path.exists(path):
self.log.info("No such dir {0}".format(path))
return
for entry in os.listdir(path):
# Skip .git folder
if entry == '.git':
continue
full_path = os.path.join(path, entry)
# file or dir name is the hostname
hostname = strip_exts(entry, ('.yml', '.yaml', '.json'))
if os.path.isfile(full_path):
# Parse contents of file as host vars.
self._parse_hostvar_file(hostname, full_path)
elif os.path.isdir(full_path):
# Parse each file in the directory as a file containing
# variables for the host.
for file_entry in os.listdir(full_path):
p = os.path.join(full_path, file_entry)
if not os.path.isdir(p):
self._parse_hostvar_file(hostname, p) | [
"def",
"_parse_hostvar_dir",
"(",
"self",
",",
"inventory_path",
")",
":",
"# inventory_path could point to a `hosts` file, or to a dir. So we",
"# construct the location to the `host_vars` differently.",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"inventory_path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"inventory_path",
",",
"'host_vars'",
")",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"inventory_path",
")",
",",
"'host_vars'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Parsing host vars (dir): {0}\"",
".",
"format",
"(",
"path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"No such dir {0}\"",
".",
"format",
"(",
"path",
")",
")",
"return",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"# Skip .git folder",
"if",
"entry",
"==",
"'.git'",
":",
"continue",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"entry",
")",
"# file or dir name is the hostname",
"hostname",
"=",
"strip_exts",
"(",
"entry",
",",
"(",
"'.yml'",
",",
"'.yaml'",
",",
"'.json'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"# Parse contents of file as host vars.",
"self",
".",
"_parse_hostvar_file",
"(",
"hostname",
",",
"full_path",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"full_path",
")",
":",
"# Parse each file in the directory as a file containing",
"# variables for the host.",
"for",
"file_entry",
"in",
"os",
".",
"listdir",
"(",
"full_path",
")",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"full_path",
",",
"file_entry",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"self",
".",
"_parse_hostvar_file",
"(",
"hostname",
",",
"p",
")"
] | 40.914286 | 0.001364 | [
"def _parse_hostvar_dir(self, inventory_path):\n",
" \"\"\"\n",
" Parse host_vars dir, if it exists.\n",
" \"\"\"\n",
" # inventory_path could point to a `hosts` file, or to a dir. So we\n",
" # construct the location to the `host_vars` differently.\n",
" if os.path.isdir(inventory_path):\n",
" path = os.path.join(inventory_path, 'host_vars')\n",
" else:\n",
" path = os.path.join(os.path.dirname(inventory_path), 'host_vars')\n",
"\n",
" self.log.debug(\"Parsing host vars (dir): {0}\".format(path))\n",
" if not os.path.exists(path):\n",
" self.log.info(\"No such dir {0}\".format(path))\n",
" return\n",
"\n",
" for entry in os.listdir(path):\n",
" # Skip .git folder\n",
" if entry == '.git':\n",
" continue\n",
" full_path = os.path.join(path, entry)\n",
"\n",
" # file or dir name is the hostname\n",
" hostname = strip_exts(entry, ('.yml', '.yaml', '.json'))\n",
"\n",
" if os.path.isfile(full_path):\n",
" # Parse contents of file as host vars.\n",
" self._parse_hostvar_file(hostname, full_path)\n",
" elif os.path.isdir(full_path):\n",
" # Parse each file in the directory as a file containing\n",
" # variables for the host.\n",
" for file_entry in os.listdir(full_path):\n",
" p = os.path.join(full_path, file_entry)\n",
" if not os.path.isdir(p):\n",
" self._parse_hostvar_file(hostname, p)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01639344262295082
] | 35 | 0.002849 |
def calculate_tetra_zscore(filename):
"""Returns TETRA Z-score for the sequence in the passed file.
- filename - path to sequence file
Calculates mono-, di-, tri- and tetranucleotide frequencies
for each sequence, on each strand, and follows Teeling et al. (2004)
in calculating a corresponding Z-score for each observed
tetranucleotide frequency, dependent on the mono-, di- and tri-
nucleotide frequencies for that input sequence.
"""
# For the Teeling et al. method, the Z-scores require us to count
# mono, di, tri and tetranucleotide sequences - these are stored
# (in order) in the counts tuple
counts = (collections.defaultdict(int), collections.defaultdict(int),
collections.defaultdict(int), collections.defaultdict(int))
for rec in SeqIO.parse(filename, 'fasta'):
for seq in [str(rec.seq).upper(),
str(rec.seq.reverse_complement()).upper()]:
# The Teeling et al. algorithm requires us to consider
# both strand orientations, so monocounts are easy
for base in ('G', 'C', 'T', 'A'):
counts[0][base] += seq.count(base)
# For di, tri and tetranucleotide counts, loop over the
# sequence and its reverse complement, until near the end:
for i in range(len(seq[:-4])):
din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4]
counts[1][str(din)] += 1
counts[2][str(tri)] += 1
counts[3][str(tetra)] += 1
# Then clean up the straggling bit at the end:
counts[2][str(seq[-4:-1])] += 1
counts[2][str(seq[-3:])] += 1
counts[1][str(seq[-4:-2])] += 1
counts[1][str(seq[-3:-1])] += 1
counts[1][str(seq[-2:])] += 1
# Following Teeling (2004), calculate expected frequencies for each
# tetranucleotide; we ignore ambiguity symbols
tetra_exp = {}
for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]:
tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \
counts[1][tet[1:3]]
# Following Teeling (2004) we approximate the std dev and Z-score for each
# tetranucleotide
tetra_sd = {}
tetra_z = {}
for tet, exp in list(tetra_exp.items()):
den = counts[1][tet[1:3]]
tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) *
(den - counts[2][tet[1:]]) / (den * den))
try:
tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet]
except ZeroDivisionError:
# To record if we hit a zero in the estimation of variance
# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]
tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]])
return tetra_z | [
"def",
"calculate_tetra_zscore",
"(",
"filename",
")",
":",
"# For the Teeling et al. method, the Z-scores require us to count",
"# mono, di, tri and tetranucleotide sequences - these are stored",
"# (in order) in the counts tuple",
"counts",
"=",
"(",
"collections",
".",
"defaultdict",
"(",
"int",
")",
",",
"collections",
".",
"defaultdict",
"(",
"int",
")",
",",
"collections",
".",
"defaultdict",
"(",
"int",
")",
",",
"collections",
".",
"defaultdict",
"(",
"int",
")",
")",
"for",
"rec",
"in",
"SeqIO",
".",
"parse",
"(",
"filename",
",",
"'fasta'",
")",
":",
"for",
"seq",
"in",
"[",
"str",
"(",
"rec",
".",
"seq",
")",
".",
"upper",
"(",
")",
",",
"str",
"(",
"rec",
".",
"seq",
".",
"reverse_complement",
"(",
")",
")",
".",
"upper",
"(",
")",
"]",
":",
"# The Teeling et al. algorithm requires us to consider",
"# both strand orientations, so monocounts are easy",
"for",
"base",
"in",
"(",
"'G'",
",",
"'C'",
",",
"'T'",
",",
"'A'",
")",
":",
"counts",
"[",
"0",
"]",
"[",
"base",
"]",
"+=",
"seq",
".",
"count",
"(",
"base",
")",
"# For di, tri and tetranucleotide counts, loop over the",
"# sequence and its reverse complement, until near the end:",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"seq",
"[",
":",
"-",
"4",
"]",
")",
")",
":",
"din",
",",
"tri",
",",
"tetra",
"=",
"seq",
"[",
"i",
":",
"i",
"+",
"2",
"]",
",",
"seq",
"[",
"i",
":",
"i",
"+",
"3",
"]",
",",
"seq",
"[",
"i",
":",
"i",
"+",
"4",
"]",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"din",
")",
"]",
"+=",
"1",
"counts",
"[",
"2",
"]",
"[",
"str",
"(",
"tri",
")",
"]",
"+=",
"1",
"counts",
"[",
"3",
"]",
"[",
"str",
"(",
"tetra",
")",
"]",
"+=",
"1",
"# Then clean up the straggling bit at the end:",
"counts",
"[",
"2",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"4",
":",
"-",
"1",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"2",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"3",
":",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"4",
":",
"-",
"2",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"3",
":",
"-",
"1",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"2",
":",
"]",
")",
"]",
"+=",
"1",
"# Following Teeling (2004), calculate expected frequencies for each",
"# tetranucleotide; we ignore ambiguity symbols",
"tetra_exp",
"=",
"{",
"}",
"for",
"tet",
"in",
"[",
"tetn",
"for",
"tetn",
"in",
"counts",
"[",
"3",
"]",
"if",
"tetra_clean",
"(",
"tetn",
")",
"]",
":",
"tetra_exp",
"[",
"tet",
"]",
"=",
"1.",
"*",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
":",
"3",
"]",
"]",
"*",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
"1",
":",
"]",
"]",
"/",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
"# Following Teeling (2004) we approximate the std dev and Z-score for each",
"# tetranucleotide",
"tetra_sd",
"=",
"{",
"}",
"tetra_z",
"=",
"{",
"}",
"for",
"tet",
",",
"exp",
"in",
"list",
"(",
"tetra_exp",
".",
"items",
"(",
")",
")",
":",
"den",
"=",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
"tetra_sd",
"[",
"tet",
"]",
"=",
"math",
".",
"sqrt",
"(",
"exp",
"*",
"(",
"den",
"-",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
":",
"3",
"]",
"]",
")",
"*",
"(",
"den",
"-",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
"1",
":",
"]",
"]",
")",
"/",
"(",
"den",
"*",
"den",
")",
")",
"try",
":",
"tetra_z",
"[",
"tet",
"]",
"=",
"(",
"counts",
"[",
"3",
"]",
"[",
"tet",
"]",
"-",
"exp",
")",
"/",
"tetra_sd",
"[",
"tet",
"]",
"except",
"ZeroDivisionError",
":",
"# To record if we hit a zero in the estimation of variance",
"# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]",
"tetra_z",
"[",
"tet",
"]",
"=",
"1",
"/",
"(",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
"*",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
")",
"return",
"tetra_z"
] | 49.192982 | 0.00035 | [
"def calculate_tetra_zscore(filename):\n",
" \"\"\"Returns TETRA Z-score for the sequence in the passed file.\n",
"\n",
" - filename - path to sequence file\n",
"\n",
" Calculates mono-, di-, tri- and tetranucleotide frequencies\n",
" for each sequence, on each strand, and follows Teeling et al. (2004)\n",
" in calculating a corresponding Z-score for each observed\n",
" tetranucleotide frequency, dependent on the mono-, di- and tri-\n",
" nucleotide frequencies for that input sequence.\n",
" \"\"\"\n",
" # For the Teeling et al. method, the Z-scores require us to count\n",
" # mono, di, tri and tetranucleotide sequences - these are stored\n",
" # (in order) in the counts tuple\n",
" counts = (collections.defaultdict(int), collections.defaultdict(int),\n",
" collections.defaultdict(int), collections.defaultdict(int))\n",
" for rec in SeqIO.parse(filename, 'fasta'):\n",
" for seq in [str(rec.seq).upper(),\n",
" str(rec.seq.reverse_complement()).upper()]:\n",
" # The Teeling et al. algorithm requires us to consider\n",
" # both strand orientations, so monocounts are easy\n",
" for base in ('G', 'C', 'T', 'A'):\n",
" counts[0][base] += seq.count(base)\n",
" # For di, tri and tetranucleotide counts, loop over the\n",
" # sequence and its reverse complement, until near the end:\n",
" for i in range(len(seq[:-4])):\n",
" din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4]\n",
" counts[1][str(din)] += 1\n",
" counts[2][str(tri)] += 1\n",
" counts[3][str(tetra)] += 1\n",
" # Then clean up the straggling bit at the end:\n",
" counts[2][str(seq[-4:-1])] += 1\n",
" counts[2][str(seq[-3:])] += 1\n",
" counts[1][str(seq[-4:-2])] += 1\n",
" counts[1][str(seq[-3:-1])] += 1\n",
" counts[1][str(seq[-2:])] += 1\n",
" # Following Teeling (2004), calculate expected frequencies for each\n",
" # tetranucleotide; we ignore ambiguity symbols\n",
" tetra_exp = {}\n",
" for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]:\n",
" tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \\\n",
" counts[1][tet[1:3]]\n",
" # Following Teeling (2004) we approximate the std dev and Z-score for each\n",
" # tetranucleotide\n",
" tetra_sd = {}\n",
" tetra_z = {}\n",
" for tet, exp in list(tetra_exp.items()):\n",
" den = counts[1][tet[1:3]]\n",
" tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) *\n",
" (den - counts[2][tet[1:]]) / (den * den))\n",
" try:\n",
" tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet]\n",
" except ZeroDivisionError:\n",
" # To record if we hit a zero in the estimation of variance\n",
" # zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]\n",
" tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]])\n",
" return tetra_z"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 57 | 0.000975 |
def coordinatesFromIndex(index, dimensions):
"""
Translate an index into coordinates, using the given coordinate system.
Similar to ``numpy.unravel_index``.
:param index: (int) The index of the point. The coordinates are expressed as a
single index by using the dimensions as a mixed radix definition. For
example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
:param dimensions (list of ints) The coordinate system.
:returns: (list) of coordinates of length ``len(dimensions)``.
"""
coordinates = [0] * len(dimensions)
shifted = index
for i in xrange(len(dimensions) - 1, 0, -1):
coordinates[i] = shifted % dimensions[i]
shifted = shifted / dimensions[i]
coordinates[0] = shifted
return coordinates | [
"def",
"coordinatesFromIndex",
"(",
"index",
",",
"dimensions",
")",
":",
"coordinates",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"dimensions",
")",
"shifted",
"=",
"index",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"dimensions",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"coordinates",
"[",
"i",
"]",
"=",
"shifted",
"%",
"dimensions",
"[",
"i",
"]",
"shifted",
"=",
"shifted",
"/",
"dimensions",
"[",
"i",
"]",
"coordinates",
"[",
"0",
"]",
"=",
"shifted",
"return",
"coordinates"
] | 30.6 | 0.013942 | [
"def coordinatesFromIndex(index, dimensions):\n",
" \"\"\"\n",
" Translate an index into coordinates, using the given coordinate system.\n",
"\n",
" Similar to ``numpy.unravel_index``.\n",
"\n",
" :param index: (int) The index of the point. The coordinates are expressed as a \n",
" single index by using the dimensions as a mixed radix definition. For \n",
" example, in dimensions 42x10, the point [1, 4] is index \n",
" 1*420 + 4*10 = 460.\n",
"\n",
" :param dimensions (list of ints) The coordinate system.\n",
"\n",
" :returns: (list) of coordinates of length ``len(dimensions)``.\n",
" \"\"\"\n",
" coordinates = [0] * len(dimensions)\n",
"\n",
" shifted = index\n",
" for i in xrange(len(dimensions) - 1, 0, -1):\n",
" coordinates[i] = shifted % dimensions[i]\n",
" shifted = shifted / dimensions[i]\n",
"\n",
" coordinates[0] = shifted\n",
"\n",
" return coordinates"
] | [
0,
0.16666666666666666,
0,
0,
0,
0,
0.024390243902439025,
0.0125,
0.015151515151515152,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0.05555555555555555,
0.02127659574468085,
0,
0,
0,
0.037037037037037035,
0,
0.1
] | 25 | 0.018356 |
def check_sla(self, sla, diff_metric):
"""
Check whether the SLA has passed or failed
"""
try:
if sla.display is '%':
diff_val = float(diff_metric['percent_diff'])
else:
diff_val = float(diff_metric['absolute_diff'])
except ValueError:
return False
if not (sla.check_sla_passed(diff_val)):
self.sla_failures += 1
self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric))
return True | [
"def",
"check_sla",
"(",
"self",
",",
"sla",
",",
"diff_metric",
")",
":",
"try",
":",
"if",
"sla",
".",
"display",
"is",
"'%'",
":",
"diff_val",
"=",
"float",
"(",
"diff_metric",
"[",
"'percent_diff'",
"]",
")",
"else",
":",
"diff_val",
"=",
"float",
"(",
"diff_metric",
"[",
"'absolute_diff'",
"]",
")",
"except",
"ValueError",
":",
"return",
"False",
"if",
"not",
"(",
"sla",
".",
"check_sla_passed",
"(",
"diff_val",
")",
")",
":",
"self",
".",
"sla_failures",
"+=",
"1",
"self",
".",
"sla_failure_list",
".",
"append",
"(",
"DiffSLAFailure",
"(",
"sla",
",",
"diff_metric",
")",
")",
"return",
"True"
] | 29.8 | 0.013015 | [
"def check_sla(self, sla, diff_metric):\n",
" \"\"\"\n",
" Check whether the SLA has passed or failed\n",
" \"\"\"\n",
" try:\n",
" if sla.display is '%':\n",
" diff_val = float(diff_metric['percent_diff'])\n",
" else:\n",
" diff_val = float(diff_metric['absolute_diff'])\n",
" except ValueError:\n",
" return False\n",
" if not (sla.check_sla_passed(diff_val)):\n",
" self.sla_failures += 1\n",
" self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric))\n",
" return True"
] | [
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0.08333333333333333,
0,
0,
0.05263157894736842,
0,
0.034482758620689655,
0.014492753623188406,
0.06666666666666667
] | 15 | 0.019073 |