text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
| texts
sequence | scores
sequence | num_lines
int64 3
2.77k
| avg_score
float64 0
0.37
|
---|---|---|---|---|---|---|---|
def from_fortran_file(cls, fortran_file: str, tmpdir: str = "."):
"""Builds GrFN object from a Fortran program."""
stem = Path(fortran_file).stem
if tmpdir == "." and "/" in fortran_file:
tmpdir = Path(fortran_file).parent
preprocessed_fortran_file = f"{tmpdir}/{stem}_preprocessed.f"
lambdas_path = f"{tmpdir}/{stem}_lambdas.py"
json_filename = stem + ".json"
with open(fortran_file, "r") as f:
inputLines = f.readlines()
with open(preprocessed_fortran_file, "w") as f:
f.write(preprocessor.process(inputLines))
xml_string = sp.run(
[
"java",
"fortran.ofp.FrontEnd",
"--class",
"fortran.ofp.XMLPrinter",
"--verbosity",
"0",
preprocessed_fortran_file,
],
stdout=sp.PIPE,
).stdout
trees = [ET.fromstring(xml_string)]
comments = get_comments.get_comments(preprocessed_fortran_file)
os.remove(preprocessed_fortran_file)
xml_to_json_translator = translate.XMLToJSONTranslator()
outputDict = xml_to_json_translator.analyze(trees, comments)
pySrc = pyTranslate.create_python_source_list(outputDict)[0][0]
G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem)
return G | [
"def",
"from_fortran_file",
"(",
"cls",
",",
"fortran_file",
":",
"str",
",",
"tmpdir",
":",
"str",
"=",
"\".\"",
")",
":",
"stem",
"=",
"Path",
"(",
"fortran_file",
")",
".",
"stem",
"if",
"tmpdir",
"==",
"\".\"",
"and",
"\"/\"",
"in",
"fortran_file",
":",
"tmpdir",
"=",
"Path",
"(",
"fortran_file",
")",
".",
"parent",
"preprocessed_fortran_file",
"=",
"f\"{tmpdir}/{stem}_preprocessed.f\"",
"lambdas_path",
"=",
"f\"{tmpdir}/{stem}_lambdas.py\"",
"json_filename",
"=",
"stem",
"+",
"\".json\"",
"with",
"open",
"(",
"fortran_file",
",",
"\"r\"",
")",
"as",
"f",
":",
"inputLines",
"=",
"f",
".",
"readlines",
"(",
")",
"with",
"open",
"(",
"preprocessed_fortran_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"preprocessor",
".",
"process",
"(",
"inputLines",
")",
")",
"xml_string",
"=",
"sp",
".",
"run",
"(",
"[",
"\"java\"",
",",
"\"fortran.ofp.FrontEnd\"",
",",
"\"--class\"",
",",
"\"fortran.ofp.XMLPrinter\"",
",",
"\"--verbosity\"",
",",
"\"0\"",
",",
"preprocessed_fortran_file",
",",
"]",
",",
"stdout",
"=",
"sp",
".",
"PIPE",
",",
")",
".",
"stdout",
"trees",
"=",
"[",
"ET",
".",
"fromstring",
"(",
"xml_string",
")",
"]",
"comments",
"=",
"get_comments",
".",
"get_comments",
"(",
"preprocessed_fortran_file",
")",
"os",
".",
"remove",
"(",
"preprocessed_fortran_file",
")",
"xml_to_json_translator",
"=",
"translate",
".",
"XMLToJSONTranslator",
"(",
")",
"outputDict",
"=",
"xml_to_json_translator",
".",
"analyze",
"(",
"trees",
",",
"comments",
")",
"pySrc",
"=",
"pyTranslate",
".",
"create_python_source_list",
"(",
"outputDict",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"G",
"=",
"cls",
".",
"from_python_src",
"(",
"pySrc",
",",
"lambdas_path",
",",
"json_filename",
",",
"stem",
")",
"return",
"G"
] | 38.055556 | 0.001423 | [
"def from_fortran_file(cls, fortran_file: str, tmpdir: str = \".\"):\n",
" \"\"\"Builds GrFN object from a Fortran program.\"\"\"\n",
" stem = Path(fortran_file).stem\n",
" if tmpdir == \".\" and \"/\" in fortran_file:\n",
" tmpdir = Path(fortran_file).parent\n",
" preprocessed_fortran_file = f\"{tmpdir}/{stem}_preprocessed.f\"\n",
" lambdas_path = f\"{tmpdir}/{stem}_lambdas.py\"\n",
" json_filename = stem + \".json\"\n",
"\n",
" with open(fortran_file, \"r\") as f:\n",
" inputLines = f.readlines()\n",
"\n",
" with open(preprocessed_fortran_file, \"w\") as f:\n",
" f.write(preprocessor.process(inputLines))\n",
"\n",
" xml_string = sp.run(\n",
" [\n",
" \"java\",\n",
" \"fortran.ofp.FrontEnd\",\n",
" \"--class\",\n",
" \"fortran.ofp.XMLPrinter\",\n",
" \"--verbosity\",\n",
" \"0\",\n",
" preprocessed_fortran_file,\n",
" ],\n",
" stdout=sp.PIPE,\n",
" ).stdout\n",
" trees = [ET.fromstring(xml_string)]\n",
" comments = get_comments.get_comments(preprocessed_fortran_file)\n",
" os.remove(preprocessed_fortran_file)\n",
" xml_to_json_translator = translate.XMLToJSONTranslator()\n",
" outputDict = xml_to_json_translator.analyze(trees, comments)\n",
" pySrc = pyTranslate.create_python_source_list(outputDict)[0][0]\n",
"\n",
" G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem)\n",
" return G"
] | [
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 36 | 0.002223 |
def get_serializer(context):
"""Returns a serializer for a given context"""
cluster_config = context.get_cluster_config()
serializer_clsname = cluster_config.get(constants.TOPOLOGY_SERIALIZER_CLASSNAME, None)
if serializer_clsname is None:
return PythonSerializer()
else:
try:
topo_pex_path = context.get_topology_pex_path()
pex_loader.load_pex(topo_pex_path)
serializer_cls = pex_loader.import_and_get_class(topo_pex_path, serializer_clsname)
serializer = serializer_cls()
return serializer
except Exception as e:
raise RuntimeError("Error with loading custom serializer class: %s, with error message: %s"
% (serializer_clsname, str(e))) | [
"def",
"get_serializer",
"(",
"context",
")",
":",
"cluster_config",
"=",
"context",
".",
"get_cluster_config",
"(",
")",
"serializer_clsname",
"=",
"cluster_config",
".",
"get",
"(",
"constants",
".",
"TOPOLOGY_SERIALIZER_CLASSNAME",
",",
"None",
")",
"if",
"serializer_clsname",
"is",
"None",
":",
"return",
"PythonSerializer",
"(",
")",
"else",
":",
"try",
":",
"topo_pex_path",
"=",
"context",
".",
"get_topology_pex_path",
"(",
")",
"pex_loader",
".",
"load_pex",
"(",
"topo_pex_path",
")",
"serializer_cls",
"=",
"pex_loader",
".",
"import_and_get_class",
"(",
"topo_pex_path",
",",
"serializer_clsname",
")",
"serializer",
"=",
"serializer_cls",
"(",
")",
"return",
"serializer",
"except",
"Exception",
"as",
"e",
":",
"raise",
"RuntimeError",
"(",
"\"Error with loading custom serializer class: %s, with error message: %s\"",
"%",
"(",
"serializer_clsname",
",",
"str",
"(",
"e",
")",
")",
")"
] | 46 | 0.009321 | [
"def get_serializer(context):\n",
" \"\"\"Returns a serializer for a given context\"\"\"\n",
" cluster_config = context.get_cluster_config()\n",
" serializer_clsname = cluster_config.get(constants.TOPOLOGY_SERIALIZER_CLASSNAME, None)\n",
" if serializer_clsname is None:\n",
" return PythonSerializer()\n",
" else:\n",
" try:\n",
" topo_pex_path = context.get_topology_pex_path()\n",
" pex_loader.load_pex(topo_pex_path)\n",
" serializer_cls = pex_loader.import_and_get_class(topo_pex_path, serializer_clsname)\n",
" serializer = serializer_cls()\n",
" return serializer\n",
" except Exception as e:\n",
" raise RuntimeError(\"Error with loading custom serializer class: %s, with error message: %s\"\n",
" % (serializer_clsname, str(e)))"
] | [
0,
0,
0,
0.01098901098901099,
0,
0.03125,
0,
0.09090909090909091,
0,
0,
0.010869565217391304,
0,
0,
0.034482758620689655,
0.01,
0.017241379310344827
] | 16 | 0.012859 |
def reset_parameter(**kwargs):
"""Create a callback that resets the parameter after the first iteration.
Note
----
The initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs : value should be list or function
List of parameters for each boosting round
or a customized function that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If function func, parameter = func(current_round).
Returns
-------
callback : function
The callback that resets the parameter after the first iteration.
"""
def _callback(env):
new_parameters = {}
for key, value in kwargs.items():
if key in ['num_class', 'num_classes',
'boosting', 'boost', 'boosting_type',
'metric', 'metrics', 'metric_types']:
raise RuntimeError("cannot reset {} during training".format(repr(key)))
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'."
.format(repr(key)))
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters)
_callback.before_iteration = True
_callback.order = 10
return _callback | [
"def",
"reset_parameter",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"_callback",
"(",
"env",
")",
":",
"new_parameters",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"[",
"'num_class'",
",",
"'num_classes'",
",",
"'boosting'",
",",
"'boost'",
",",
"'boosting_type'",
",",
"'metric'",
",",
"'metrics'",
",",
"'metric_types'",
"]",
":",
"raise",
"RuntimeError",
"(",
"\"cannot reset {} during training\"",
".",
"format",
"(",
"repr",
"(",
"key",
")",
")",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"len",
"(",
"value",
")",
"!=",
"env",
".",
"end_iteration",
"-",
"env",
".",
"begin_iteration",
":",
"raise",
"ValueError",
"(",
"\"Length of list {} has to equal to 'num_boost_round'.\"",
".",
"format",
"(",
"repr",
"(",
"key",
")",
")",
")",
"new_param",
"=",
"value",
"[",
"env",
".",
"iteration",
"-",
"env",
".",
"begin_iteration",
"]",
"else",
":",
"new_param",
"=",
"value",
"(",
"env",
".",
"iteration",
"-",
"env",
".",
"begin_iteration",
")",
"if",
"new_param",
"!=",
"env",
".",
"params",
".",
"get",
"(",
"key",
",",
"None",
")",
":",
"new_parameters",
"[",
"key",
"]",
"=",
"new_param",
"if",
"new_parameters",
":",
"env",
".",
"model",
".",
"reset_parameter",
"(",
"new_parameters",
")",
"env",
".",
"params",
".",
"update",
"(",
"new_parameters",
")",
"_callback",
".",
"before_iteration",
"=",
"True",
"_callback",
".",
"order",
"=",
"10",
"return",
"_callback"
] | 41.023256 | 0.001661 | [
"def reset_parameter(**kwargs):\n",
" \"\"\"Create a callback that resets the parameter after the first iteration.\n",
"\n",
" Note\n",
" ----\n",
" The initial parameter will still take in-effect on first iteration.\n",
"\n",
" Parameters\n",
" ----------\n",
" **kwargs : value should be list or function\n",
" List of parameters for each boosting round\n",
" or a customized function that calculates the parameter in terms of\n",
" current number of round (e.g. yields learning rate decay).\n",
" If list lst, parameter = lst[current_round].\n",
" If function func, parameter = func(current_round).\n",
"\n",
" Returns\n",
" -------\n",
" callback : function\n",
" The callback that resets the parameter after the first iteration.\n",
" \"\"\"\n",
" def _callback(env):\n",
" new_parameters = {}\n",
" for key, value in kwargs.items():\n",
" if key in ['num_class', 'num_classes',\n",
" 'boosting', 'boost', 'boosting_type',\n",
" 'metric', 'metrics', 'metric_types']:\n",
" raise RuntimeError(\"cannot reset {} during training\".format(repr(key)))\n",
" if isinstance(value, list):\n",
" if len(value) != env.end_iteration - env.begin_iteration:\n",
" raise ValueError(\"Length of list {} has to equal to 'num_boost_round'.\"\n",
" .format(repr(key)))\n",
" new_param = value[env.iteration - env.begin_iteration]\n",
" else:\n",
" new_param = value(env.iteration - env.begin_iteration)\n",
" if new_param != env.params.get(key, None):\n",
" new_parameters[key] = new_param\n",
" if new_parameters:\n",
" env.model.reset_parameter(new_parameters)\n",
" env.params.update(new_parameters)\n",
" _callback.before_iteration = True\n",
" _callback.order = 10\n",
" return _callback"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 43 | 0.00168 |
def _filter(self, filename):
"""
return 'true' if filename doesn't match name_filter regex and should be filtered out of the list.
@param filename:
@return:
"""
return self.name_filter is not None and re.search(self.name_filter, filename) is None | [
"def",
"_filter",
"(",
"self",
",",
"filename",
")",
":",
"return",
"self",
".",
"name_filter",
"is",
"not",
"None",
"and",
"re",
".",
"search",
"(",
"self",
".",
"name_filter",
",",
"filename",
")",
"is",
"None"
] | 41.142857 | 0.013605 | [
"def _filter(self, filename):\n",
" \"\"\"\n",
" return 'true' if filename doesn't match name_filter regex and should be filtered out of the list.\n",
" @param filename:\n",
" @return:\n",
" \"\"\"\n",
" return self.name_filter is not None and re.search(self.name_filter, filename) is None"
] | [
0,
0.08333333333333333,
0.009433962264150943,
0,
0,
0,
0.021505376344086023
] | 7 | 0.016325 |
def load_source(source):
"""
Common entry point for loading some form of raw swagger schema.
Supports:
- python object (dictionary-like)
- path to yaml file
- path to json file
- file object (json or yaml).
- json string.
- yaml string.
"""
if isinstance(source, collections.Mapping):
return deepcopy(source)
elif hasattr(source, 'read') and callable(source.read):
raw_source = source.read()
elif os.path.exists(os.path.expanduser(str(source))):
with open(os.path.expanduser(str(source)), 'r') as source_file:
raw_source = source_file.read()
elif isinstance(source, six.string_types):
parts = urlparse.urlparse(source)
if parts.scheme and parts.netloc:
response = requests.get(source)
if isinstance(response.content, six.binary_type):
raw_source = six.text_type(response.content, encoding='utf-8')
else:
raw_source = response.content
else:
raw_source = source
try:
try:
return json.loads(raw_source)
except ValueError:
pass
try:
return yaml.safe_load(raw_source)
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
pass
except NameError:
pass
raise ValueError(
"Unable to parse `{0}`. Tried yaml and json.".format(source),
) | [
"def",
"load_source",
"(",
"source",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"deepcopy",
"(",
"source",
")",
"elif",
"hasattr",
"(",
"source",
",",
"'read'",
")",
"and",
"callable",
"(",
"source",
".",
"read",
")",
":",
"raw_source",
"=",
"source",
".",
"read",
"(",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"str",
"(",
"source",
")",
")",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"str",
"(",
"source",
")",
")",
",",
"'r'",
")",
"as",
"source_file",
":",
"raw_source",
"=",
"source_file",
".",
"read",
"(",
")",
"elif",
"isinstance",
"(",
"source",
",",
"six",
".",
"string_types",
")",
":",
"parts",
"=",
"urlparse",
".",
"urlparse",
"(",
"source",
")",
"if",
"parts",
".",
"scheme",
"and",
"parts",
".",
"netloc",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"source",
")",
"if",
"isinstance",
"(",
"response",
".",
"content",
",",
"six",
".",
"binary_type",
")",
":",
"raw_source",
"=",
"six",
".",
"text_type",
"(",
"response",
".",
"content",
",",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"raw_source",
"=",
"response",
".",
"content",
"else",
":",
"raw_source",
"=",
"source",
"try",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"raw_source",
")",
"except",
"ValueError",
":",
"pass",
"try",
":",
"return",
"yaml",
".",
"safe_load",
"(",
"raw_source",
")",
"except",
"(",
"yaml",
".",
"scanner",
".",
"ScannerError",
",",
"yaml",
".",
"parser",
".",
"ParserError",
")",
":",
"pass",
"except",
"NameError",
":",
"pass",
"raise",
"ValueError",
"(",
"\"Unable to parse `{0}`. Tried yaml and json.\"",
".",
"format",
"(",
"source",
")",
",",
")"
] | 30.978261 | 0.00068 | [
"def load_source(source):\n",
" \"\"\"\n",
" Common entry point for loading some form of raw swagger schema.\n",
"\n",
" Supports:\n",
" - python object (dictionary-like)\n",
" - path to yaml file\n",
" - path to json file\n",
" - file object (json or yaml).\n",
" - json string.\n",
" - yaml string.\n",
" \"\"\"\n",
" if isinstance(source, collections.Mapping):\n",
" return deepcopy(source)\n",
" elif hasattr(source, 'read') and callable(source.read):\n",
" raw_source = source.read()\n",
" elif os.path.exists(os.path.expanduser(str(source))):\n",
" with open(os.path.expanduser(str(source)), 'r') as source_file:\n",
" raw_source = source_file.read()\n",
" elif isinstance(source, six.string_types):\n",
" parts = urlparse.urlparse(source)\n",
" if parts.scheme and parts.netloc:\n",
" response = requests.get(source)\n",
" if isinstance(response.content, six.binary_type):\n",
" raw_source = six.text_type(response.content, encoding='utf-8')\n",
" else:\n",
" raw_source = response.content\n",
" else:\n",
" raw_source = source\n",
"\n",
" try:\n",
" try:\n",
" return json.loads(raw_source)\n",
" except ValueError:\n",
" pass\n",
"\n",
" try:\n",
" return yaml.safe_load(raw_source)\n",
" except (yaml.scanner.ScannerError, yaml.parser.ParserError):\n",
" pass\n",
" except NameError:\n",
" pass\n",
"\n",
" raise ValueError(\n",
" \"Unable to parse `{0}`. Tried yaml and json.\".format(source),\n",
" )"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2
] | 46 | 0.004348 |
def metric(self, slug, num=1, category=None, expire=None, date=None):
"""Records a metric, creating it if it doesn't exist or incrementing it
if it does. All metrics are prefixed with 'm', and automatically
aggregate for Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``num`` -- Set or Increment the metric by this number; default is 1.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
if category:
self._categorize(slug, category)
# Increment keys. NOTE: current redis-py (2.7.2) doesn't include an
# incrby method; .incr accepts a second ``amount`` parameter.
keys = self._build_keys(slug, date=date)
# Use a pipeline to speed up incrementing multiple keys
pipe = self.r.pipeline()
for key in keys:
pipe.incr(key, num)
if expire:
pipe.expire(key, expire)
pipe.execute() | [
"def",
"metric",
"(",
"self",
",",
"slug",
",",
"num",
"=",
"1",
",",
"category",
"=",
"None",
",",
"expire",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"# Add the slug to the set of metric slugs",
"self",
".",
"r",
".",
"sadd",
"(",
"self",
".",
"_metric_slugs_key",
",",
"slug",
")",
"if",
"category",
":",
"self",
".",
"_categorize",
"(",
"slug",
",",
"category",
")",
"# Increment keys. NOTE: current redis-py (2.7.2) doesn't include an",
"# incrby method; .incr accepts a second ``amount`` parameter.",
"keys",
"=",
"self",
".",
"_build_keys",
"(",
"slug",
",",
"date",
"=",
"date",
")",
"# Use a pipeline to speed up incrementing multiple keys",
"pipe",
"=",
"self",
".",
"r",
".",
"pipeline",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"pipe",
".",
"incr",
"(",
"key",
",",
"num",
")",
"if",
"expire",
":",
"pipe",
".",
"expire",
"(",
"key",
",",
"expire",
")",
"pipe",
".",
"execute",
"(",
")"
] | 42.886364 | 0.001036 | [
"def metric(self, slug, num=1, category=None, expire=None, date=None):\n",
" \"\"\"Records a metric, creating it if it doesn't exist or incrementing it\n",
" if it does. All metrics are prefixed with 'm', and automatically\n",
" aggregate for Seconds, Minutes, Hours, Day, Week, Month, and Year.\n",
"\n",
" Parameters:\n",
"\n",
" * ``slug`` -- a unique value to identify the metric; used in\n",
" construction of redis keys (see below).\n",
" * ``num`` -- Set or Increment the metric by this number; default is 1.\n",
" * ``category`` -- (optional) Assign the metric to a Category (a string)\n",
" * ``expire`` -- (optional) Specify the number of seconds in which the\n",
" metric will expire.\n",
" * ``date`` -- (optional) Specify the timestamp for the metric; default\n",
" used to build the keys will be the current date and time in UTC form.\n",
"\n",
" Redis keys for each metric (slug) take the form:\n",
"\n",
" m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second\n",
" m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute\n",
" m:<slug>:h:<yyyy-mm-dd-hh> # Hour\n",
" m:<slug>:<yyyy-mm-dd> # Day\n",
" m:<slug>:w:<yyyy-num> # Week (year - week number)\n",
" m:<slug>:m:<yyyy-mm> # Month\n",
" m:<slug>:y:<yyyy> # Year\n",
"\n",
" \"\"\"\n",
" # Add the slug to the set of metric slugs\n",
" self.r.sadd(self._metric_slugs_key, slug)\n",
"\n",
" if category:\n",
" self._categorize(slug, category)\n",
"\n",
" # Increment keys. NOTE: current redis-py (2.7.2) doesn't include an\n",
" # incrby method; .incr accepts a second ``amount`` parameter.\n",
" keys = self._build_keys(slug, date=date)\n",
"\n",
" # Use a pipeline to speed up incrementing multiple keys\n",
" pipe = self.r.pipeline()\n",
" for key in keys:\n",
" pipe.incr(key, num)\n",
" if expire:\n",
" pipe.expire(key, expire)\n",
" pipe.execute()"
] | [
0,
0.0125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 44 | 0.001317 |
def weather_from_dictionary(d):
"""
Builds a *Weather* object out of a data dictionary. Only certain
properties of the dictionary are used: if these properties are not
found or cannot be read, an error is issued.
:param d: a data dictionary
:type d: dict
:returns: a *Weather* instance
:raises: *KeyError* if it is impossible to find or read the data
needed to build the instance
"""
# -- times
if 'dt' in d:
reference_time = d['dt']
elif 'dt' in d['last']:
reference_time = d['last']['dt']
if 'sys' in d and 'sunset' in d['sys']:
sunset_time = d['sys']['sunset']
else:
sunset_time = 0
if 'sys' in d and 'sunrise' in d['sys']:
sunrise_time = d['sys']['sunrise']
else:
sunrise_time = 0
# -- calc
if 'calc' in d:
if 'dewpoint' in d['calc']:
dewpoint = d['calc']['dewpoint']
else:
dewpoint = None
if 'humidex' in d['calc']:
humidex = d['calc']['humidex']
else:
humidex = None
if 'heatindex' in d['calc']:
heat_index = d['calc']['heatindex']
else:
heat_index = None
elif 'last' in d:
if 'calc' in d['last']:
if 'dewpoint' in d['last']['calc']:
dewpoint = d['last']['calc']['dewpoint']
else:
dewpoint = None
if 'humidex' in d['last']['calc']:
humidex = d['last']['calc']['humidex']
else:
humidex = None
if 'heatindex' in d['last']['calc']:
heat_index = d['last']['calc']['heatindex']
else:
heat_index = None
else:
dewpoint = None
humidex = None
heat_index = None
# -- visibility
if 'visibility' in d:
if isinstance(d['visibility'], int):
visibility_distance = d['visibility']
elif 'distance' in d['visibility']:
visibility_distance = d['visibility']['distance']
else:
visibility_distance = None
elif 'last' in d and 'visibility' in d['last']:
if isinstance(d['last']['visibility'], int):
visibility_distance = d['last']['visibility']
elif 'distance' in d['last']['visibility']:
visibility_distance = d['last']['visibility']['distance']
else:
visibility_distance = None
else:
visibility_distance = None
# -- clouds
if 'clouds' in d:
if isinstance(d['clouds'], int) or isinstance(d['clouds'], float):
clouds = d['clouds']
elif 'all' in d['clouds']:
clouds = d['clouds']['all']
else:
clouds = 0
else:
clouds = 0
# -- rain
if 'rain' in d:
if isinstance(d['rain'], int) or isinstance(d['rain'], float):
rain = {'all': d['rain']}
else:
if d['rain'] is not None:
rain = d['rain'].copy()
else:
rain = dict()
else:
rain = dict()
# -- wind
if 'wind' in d and d['wind'] is not None:
wind = d['wind'].copy()
elif 'last' in d:
if 'wind' in d['last'] and d['last']['wind'] is not None:
wind = d['last']['wind'].copy()
else:
wind = dict()
else:
wind = dict()
if 'speed' in d:
wind['speed'] = d['speed']
if 'deg' in d:
wind['deg'] = d['deg']
# -- humidity
if 'humidity' in d:
humidity = d['humidity']
elif 'main' in d and 'humidity' in d['main']:
humidity = d['main']['humidity']
elif 'last' in d and 'main' in d['last'] and 'humidity' in d['last']['main']:
humidity = d['last']['main']['humidity']
else:
humidity = 0
# -- snow
if 'snow' in d:
if isinstance(d['snow'], int) or isinstance(d['snow'], float):
snow = {'all': d['snow']}
else:
if d['snow'] is not None:
snow = d['snow'].copy()
else:
snow = dict()
else:
snow = dict()
# -- pressure
if 'pressure' in d:
atm_press = d['pressure']
elif 'main' in d and 'pressure' in d['main']:
atm_press = d['main']['pressure']
elif 'last' in d:
if 'main' in d['last']:
atm_press = d['last']['main']['pressure']
else:
atm_press = None
if 'main' in d and 'sea_level' in d['main']:
sea_level_press = d['main']['sea_level']
else:
sea_level_press = None
pressure = {'press': atm_press, 'sea_level': sea_level_press}
# -- temperature
if 'temp' in d:
if d['temp'] is not None:
temperature = d['temp'].copy()
else:
temperature = dict()
elif 'main' in d and 'temp' in d['main']:
temp = d['main']['temp']
if 'temp_kf' in d['main']:
temp_kf = d['main']['temp_kf']
else:
temp_kf = None
if 'temp_max' in d['main']:
temp_max = d['main']['temp_max']
else:
temp_max = None
if 'temp_min' in d['main']:
temp_min = d['main']['temp_min']
else:
temp_min = None
temperature = {'temp': temp,
'temp_kf': temp_kf,
'temp_max': temp_max,
'temp_min': temp_min
}
elif 'last' in d:
if 'main' in d['last']:
temperature = dict(temp=d['last']['main']['temp'])
else:
temperature = dict()
# -- weather status info
if 'weather' in d:
status = d['weather'][0]['main']
detailed_status = d['weather'][0]['description']
weather_code = d['weather'][0]['id']
weather_icon_name = d['weather'][0]['icon']
else:
status = ''
detailed_status = ''
weather_code = 0
weather_icon_name = ''
return Weather(reference_time, sunset_time, sunrise_time, clouds,
rain, snow, wind, humidity, pressure, temperature,
status, detailed_status, weather_code, weather_icon_name,
visibility_distance, dewpoint, humidex, heat_index) | [
"def",
"weather_from_dictionary",
"(",
"d",
")",
":",
"# -- times",
"if",
"'dt'",
"in",
"d",
":",
"reference_time",
"=",
"d",
"[",
"'dt'",
"]",
"elif",
"'dt'",
"in",
"d",
"[",
"'last'",
"]",
":",
"reference_time",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'dt'",
"]",
"if",
"'sys'",
"in",
"d",
"and",
"'sunset'",
"in",
"d",
"[",
"'sys'",
"]",
":",
"sunset_time",
"=",
"d",
"[",
"'sys'",
"]",
"[",
"'sunset'",
"]",
"else",
":",
"sunset_time",
"=",
"0",
"if",
"'sys'",
"in",
"d",
"and",
"'sunrise'",
"in",
"d",
"[",
"'sys'",
"]",
":",
"sunrise_time",
"=",
"d",
"[",
"'sys'",
"]",
"[",
"'sunrise'",
"]",
"else",
":",
"sunrise_time",
"=",
"0",
"# -- calc",
"if",
"'calc'",
"in",
"d",
":",
"if",
"'dewpoint'",
"in",
"d",
"[",
"'calc'",
"]",
":",
"dewpoint",
"=",
"d",
"[",
"'calc'",
"]",
"[",
"'dewpoint'",
"]",
"else",
":",
"dewpoint",
"=",
"None",
"if",
"'humidex'",
"in",
"d",
"[",
"'calc'",
"]",
":",
"humidex",
"=",
"d",
"[",
"'calc'",
"]",
"[",
"'humidex'",
"]",
"else",
":",
"humidex",
"=",
"None",
"if",
"'heatindex'",
"in",
"d",
"[",
"'calc'",
"]",
":",
"heat_index",
"=",
"d",
"[",
"'calc'",
"]",
"[",
"'heatindex'",
"]",
"else",
":",
"heat_index",
"=",
"None",
"elif",
"'last'",
"in",
"d",
":",
"if",
"'calc'",
"in",
"d",
"[",
"'last'",
"]",
":",
"if",
"'dewpoint'",
"in",
"d",
"[",
"'last'",
"]",
"[",
"'calc'",
"]",
":",
"dewpoint",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'calc'",
"]",
"[",
"'dewpoint'",
"]",
"else",
":",
"dewpoint",
"=",
"None",
"if",
"'humidex'",
"in",
"d",
"[",
"'last'",
"]",
"[",
"'calc'",
"]",
":",
"humidex",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'calc'",
"]",
"[",
"'humidex'",
"]",
"else",
":",
"humidex",
"=",
"None",
"if",
"'heatindex'",
"in",
"d",
"[",
"'last'",
"]",
"[",
"'calc'",
"]",
":",
"heat_index",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'calc'",
"]",
"[",
"'heatindex'",
"]",
"else",
":",
"heat_index",
"=",
"None",
"else",
":",
"dewpoint",
"=",
"None",
"humidex",
"=",
"None",
"heat_index",
"=",
"None",
"# -- visibility",
"if",
"'visibility'",
"in",
"d",
":",
"if",
"isinstance",
"(",
"d",
"[",
"'visibility'",
"]",
",",
"int",
")",
":",
"visibility_distance",
"=",
"d",
"[",
"'visibility'",
"]",
"elif",
"'distance'",
"in",
"d",
"[",
"'visibility'",
"]",
":",
"visibility_distance",
"=",
"d",
"[",
"'visibility'",
"]",
"[",
"'distance'",
"]",
"else",
":",
"visibility_distance",
"=",
"None",
"elif",
"'last'",
"in",
"d",
"and",
"'visibility'",
"in",
"d",
"[",
"'last'",
"]",
":",
"if",
"isinstance",
"(",
"d",
"[",
"'last'",
"]",
"[",
"'visibility'",
"]",
",",
"int",
")",
":",
"visibility_distance",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'visibility'",
"]",
"elif",
"'distance'",
"in",
"d",
"[",
"'last'",
"]",
"[",
"'visibility'",
"]",
":",
"visibility_distance",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'visibility'",
"]",
"[",
"'distance'",
"]",
"else",
":",
"visibility_distance",
"=",
"None",
"else",
":",
"visibility_distance",
"=",
"None",
"# -- clouds",
"if",
"'clouds'",
"in",
"d",
":",
"if",
"isinstance",
"(",
"d",
"[",
"'clouds'",
"]",
",",
"int",
")",
"or",
"isinstance",
"(",
"d",
"[",
"'clouds'",
"]",
",",
"float",
")",
":",
"clouds",
"=",
"d",
"[",
"'clouds'",
"]",
"elif",
"'all'",
"in",
"d",
"[",
"'clouds'",
"]",
":",
"clouds",
"=",
"d",
"[",
"'clouds'",
"]",
"[",
"'all'",
"]",
"else",
":",
"clouds",
"=",
"0",
"else",
":",
"clouds",
"=",
"0",
"# -- rain",
"if",
"'rain'",
"in",
"d",
":",
"if",
"isinstance",
"(",
"d",
"[",
"'rain'",
"]",
",",
"int",
")",
"or",
"isinstance",
"(",
"d",
"[",
"'rain'",
"]",
",",
"float",
")",
":",
"rain",
"=",
"{",
"'all'",
":",
"d",
"[",
"'rain'",
"]",
"}",
"else",
":",
"if",
"d",
"[",
"'rain'",
"]",
"is",
"not",
"None",
":",
"rain",
"=",
"d",
"[",
"'rain'",
"]",
".",
"copy",
"(",
")",
"else",
":",
"rain",
"=",
"dict",
"(",
")",
"else",
":",
"rain",
"=",
"dict",
"(",
")",
"# -- wind",
"if",
"'wind'",
"in",
"d",
"and",
"d",
"[",
"'wind'",
"]",
"is",
"not",
"None",
":",
"wind",
"=",
"d",
"[",
"'wind'",
"]",
".",
"copy",
"(",
")",
"elif",
"'last'",
"in",
"d",
":",
"if",
"'wind'",
"in",
"d",
"[",
"'last'",
"]",
"and",
"d",
"[",
"'last'",
"]",
"[",
"'wind'",
"]",
"is",
"not",
"None",
":",
"wind",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'wind'",
"]",
".",
"copy",
"(",
")",
"else",
":",
"wind",
"=",
"dict",
"(",
")",
"else",
":",
"wind",
"=",
"dict",
"(",
")",
"if",
"'speed'",
"in",
"d",
":",
"wind",
"[",
"'speed'",
"]",
"=",
"d",
"[",
"'speed'",
"]",
"if",
"'deg'",
"in",
"d",
":",
"wind",
"[",
"'deg'",
"]",
"=",
"d",
"[",
"'deg'",
"]",
"# -- humidity",
"if",
"'humidity'",
"in",
"d",
":",
"humidity",
"=",
"d",
"[",
"'humidity'",
"]",
"elif",
"'main'",
"in",
"d",
"and",
"'humidity'",
"in",
"d",
"[",
"'main'",
"]",
":",
"humidity",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'humidity'",
"]",
"elif",
"'last'",
"in",
"d",
"and",
"'main'",
"in",
"d",
"[",
"'last'",
"]",
"and",
"'humidity'",
"in",
"d",
"[",
"'last'",
"]",
"[",
"'main'",
"]",
":",
"humidity",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'main'",
"]",
"[",
"'humidity'",
"]",
"else",
":",
"humidity",
"=",
"0",
"# -- snow",
"if",
"'snow'",
"in",
"d",
":",
"if",
"isinstance",
"(",
"d",
"[",
"'snow'",
"]",
",",
"int",
")",
"or",
"isinstance",
"(",
"d",
"[",
"'snow'",
"]",
",",
"float",
")",
":",
"snow",
"=",
"{",
"'all'",
":",
"d",
"[",
"'snow'",
"]",
"}",
"else",
":",
"if",
"d",
"[",
"'snow'",
"]",
"is",
"not",
"None",
":",
"snow",
"=",
"d",
"[",
"'snow'",
"]",
".",
"copy",
"(",
")",
"else",
":",
"snow",
"=",
"dict",
"(",
")",
"else",
":",
"snow",
"=",
"dict",
"(",
")",
"# -- pressure",
"if",
"'pressure'",
"in",
"d",
":",
"atm_press",
"=",
"d",
"[",
"'pressure'",
"]",
"elif",
"'main'",
"in",
"d",
"and",
"'pressure'",
"in",
"d",
"[",
"'main'",
"]",
":",
"atm_press",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'pressure'",
"]",
"elif",
"'last'",
"in",
"d",
":",
"if",
"'main'",
"in",
"d",
"[",
"'last'",
"]",
":",
"atm_press",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'main'",
"]",
"[",
"'pressure'",
"]",
"else",
":",
"atm_press",
"=",
"None",
"if",
"'main'",
"in",
"d",
"and",
"'sea_level'",
"in",
"d",
"[",
"'main'",
"]",
":",
"sea_level_press",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'sea_level'",
"]",
"else",
":",
"sea_level_press",
"=",
"None",
"pressure",
"=",
"{",
"'press'",
":",
"atm_press",
",",
"'sea_level'",
":",
"sea_level_press",
"}",
"# -- temperature",
"if",
"'temp'",
"in",
"d",
":",
"if",
"d",
"[",
"'temp'",
"]",
"is",
"not",
"None",
":",
"temperature",
"=",
"d",
"[",
"'temp'",
"]",
".",
"copy",
"(",
")",
"else",
":",
"temperature",
"=",
"dict",
"(",
")",
"elif",
"'main'",
"in",
"d",
"and",
"'temp'",
"in",
"d",
"[",
"'main'",
"]",
":",
"temp",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'temp'",
"]",
"if",
"'temp_kf'",
"in",
"d",
"[",
"'main'",
"]",
":",
"temp_kf",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'temp_kf'",
"]",
"else",
":",
"temp_kf",
"=",
"None",
"if",
"'temp_max'",
"in",
"d",
"[",
"'main'",
"]",
":",
"temp_max",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'temp_max'",
"]",
"else",
":",
"temp_max",
"=",
"None",
"if",
"'temp_min'",
"in",
"d",
"[",
"'main'",
"]",
":",
"temp_min",
"=",
"d",
"[",
"'main'",
"]",
"[",
"'temp_min'",
"]",
"else",
":",
"temp_min",
"=",
"None",
"temperature",
"=",
"{",
"'temp'",
":",
"temp",
",",
"'temp_kf'",
":",
"temp_kf",
",",
"'temp_max'",
":",
"temp_max",
",",
"'temp_min'",
":",
"temp_min",
"}",
"elif",
"'last'",
"in",
"d",
":",
"if",
"'main'",
"in",
"d",
"[",
"'last'",
"]",
":",
"temperature",
"=",
"dict",
"(",
"temp",
"=",
"d",
"[",
"'last'",
"]",
"[",
"'main'",
"]",
"[",
"'temp'",
"]",
")",
"else",
":",
"temperature",
"=",
"dict",
"(",
")",
"# -- weather status info",
"if",
"'weather'",
"in",
"d",
":",
"status",
"=",
"d",
"[",
"'weather'",
"]",
"[",
"0",
"]",
"[",
"'main'",
"]",
"detailed_status",
"=",
"d",
"[",
"'weather'",
"]",
"[",
"0",
"]",
"[",
"'description'",
"]",
"weather_code",
"=",
"d",
"[",
"'weather'",
"]",
"[",
"0",
"]",
"[",
"'id'",
"]",
"weather_icon_name",
"=",
"d",
"[",
"'weather'",
"]",
"[",
"0",
"]",
"[",
"'icon'",
"]",
"else",
":",
"status",
"=",
"''",
"detailed_status",
"=",
"''",
"weather_code",
"=",
"0",
"weather_icon_name",
"=",
"''",
"return",
"Weather",
"(",
"reference_time",
",",
"sunset_time",
",",
"sunrise_time",
",",
"clouds",
",",
"rain",
",",
"snow",
",",
"wind",
",",
"humidity",
",",
"pressure",
",",
"temperature",
",",
"status",
",",
"detailed_status",
",",
"weather_code",
",",
"weather_icon_name",
",",
"visibility_distance",
",",
"dewpoint",
",",
"humidex",
",",
"heat_index",
")"
] | 31.874346 | 0.000796 | [
"def weather_from_dictionary(d):\n",
" \"\"\"\n",
" Builds a *Weather* object out of a data dictionary. Only certain\n",
" properties of the dictionary are used: if these properties are not\n",
" found or cannot be read, an error is issued.\n",
"\n",
" :param d: a data dictionary\n",
" :type d: dict\n",
" :returns: a *Weather* instance\n",
" :raises: *KeyError* if it is impossible to find or read the data\n",
" needed to build the instance\n",
"\n",
" \"\"\"\n",
" # -- times\n",
" if 'dt' in d:\n",
" reference_time = d['dt']\n",
" elif 'dt' in d['last']:\n",
" reference_time = d['last']['dt']\n",
" if 'sys' in d and 'sunset' in d['sys']:\n",
" sunset_time = d['sys']['sunset']\n",
" else:\n",
" sunset_time = 0\n",
" if 'sys' in d and 'sunrise' in d['sys']:\n",
" sunrise_time = d['sys']['sunrise']\n",
" else:\n",
" sunrise_time = 0\n",
" # -- calc\n",
" if 'calc' in d:\n",
" if 'dewpoint' in d['calc']:\n",
" dewpoint = d['calc']['dewpoint']\n",
" else:\n",
" dewpoint = None\n",
" if 'humidex' in d['calc']:\n",
" humidex = d['calc']['humidex']\n",
" else:\n",
" humidex = None\n",
" if 'heatindex' in d['calc']:\n",
" heat_index = d['calc']['heatindex']\n",
" else:\n",
" heat_index = None\n",
" elif 'last' in d:\n",
" if 'calc' in d['last']:\n",
" if 'dewpoint' in d['last']['calc']:\n",
" dewpoint = d['last']['calc']['dewpoint']\n",
" else:\n",
" dewpoint = None\n",
" if 'humidex' in d['last']['calc']:\n",
" humidex = d['last']['calc']['humidex']\n",
" else:\n",
" humidex = None\n",
" if 'heatindex' in d['last']['calc']:\n",
" heat_index = d['last']['calc']['heatindex']\n",
" else:\n",
" heat_index = None\n",
" else:\n",
" dewpoint = None\n",
" humidex = None\n",
" heat_index = None\n",
" # -- visibility\n",
" if 'visibility' in d:\n",
" if isinstance(d['visibility'], int):\n",
" visibility_distance = d['visibility']\n",
" elif 'distance' in d['visibility']:\n",
" visibility_distance = d['visibility']['distance']\n",
" else:\n",
" visibility_distance = None\n",
" elif 'last' in d and 'visibility' in d['last']:\n",
" if isinstance(d['last']['visibility'], int):\n",
" visibility_distance = d['last']['visibility']\n",
" elif 'distance' in d['last']['visibility']:\n",
" visibility_distance = d['last']['visibility']['distance']\n",
" else:\n",
" visibility_distance = None\n",
" else:\n",
" visibility_distance = None\n",
" # -- clouds\n",
" if 'clouds' in d:\n",
" if isinstance(d['clouds'], int) or isinstance(d['clouds'], float):\n",
" clouds = d['clouds']\n",
" elif 'all' in d['clouds']:\n",
" clouds = d['clouds']['all']\n",
" else:\n",
" clouds = 0\n",
" else:\n",
" clouds = 0\n",
" # -- rain\n",
" if 'rain' in d:\n",
" if isinstance(d['rain'], int) or isinstance(d['rain'], float):\n",
" rain = {'all': d['rain']}\n",
" else:\n",
" if d['rain'] is not None:\n",
" rain = d['rain'].copy()\n",
" else:\n",
" rain = dict()\n",
" else:\n",
" rain = dict()\n",
" # -- wind\n",
" if 'wind' in d and d['wind'] is not None:\n",
" wind = d['wind'].copy()\n",
" elif 'last' in d:\n",
" if 'wind' in d['last'] and d['last']['wind'] is not None:\n",
" wind = d['last']['wind'].copy()\n",
" else:\n",
" wind = dict()\n",
" else:\n",
" wind = dict()\n",
" if 'speed' in d:\n",
" wind['speed'] = d['speed']\n",
" if 'deg' in d:\n",
" wind['deg'] = d['deg']\n",
" # -- humidity\n",
" if 'humidity' in d:\n",
" humidity = d['humidity']\n",
" elif 'main' in d and 'humidity' in d['main']:\n",
" humidity = d['main']['humidity']\n",
" elif 'last' in d and 'main' in d['last'] and 'humidity' in d['last']['main']:\n",
" humidity = d['last']['main']['humidity']\n",
" else:\n",
" humidity = 0\n",
" # -- snow\n",
" if 'snow' in d:\n",
" if isinstance(d['snow'], int) or isinstance(d['snow'], float):\n",
" snow = {'all': d['snow']}\n",
" else:\n",
" if d['snow'] is not None:\n",
" snow = d['snow'].copy()\n",
" else:\n",
" snow = dict()\n",
" else:\n",
" snow = dict()\n",
" # -- pressure\n",
" if 'pressure' in d:\n",
" atm_press = d['pressure']\n",
" elif 'main' in d and 'pressure' in d['main']:\n",
" atm_press = d['main']['pressure']\n",
" elif 'last' in d:\n",
" if 'main' in d['last']:\n",
" atm_press = d['last']['main']['pressure']\n",
" else:\n",
" atm_press = None\n",
" if 'main' in d and 'sea_level' in d['main']:\n",
" sea_level_press = d['main']['sea_level']\n",
" else:\n",
" sea_level_press = None\n",
" pressure = {'press': atm_press, 'sea_level': sea_level_press}\n",
" # -- temperature\n",
" if 'temp' in d:\n",
" if d['temp'] is not None:\n",
" temperature = d['temp'].copy()\n",
" else:\n",
" temperature = dict()\n",
" elif 'main' in d and 'temp' in d['main']:\n",
" temp = d['main']['temp']\n",
" if 'temp_kf' in d['main']:\n",
" temp_kf = d['main']['temp_kf']\n",
" else:\n",
" temp_kf = None\n",
" if 'temp_max' in d['main']:\n",
" temp_max = d['main']['temp_max']\n",
" else:\n",
" temp_max = None\n",
" if 'temp_min' in d['main']:\n",
" temp_min = d['main']['temp_min']\n",
" else:\n",
" temp_min = None\n",
" temperature = {'temp': temp,\n",
" 'temp_kf': temp_kf,\n",
" 'temp_max': temp_max,\n",
" 'temp_min': temp_min\n",
" }\n",
" elif 'last' in d:\n",
" if 'main' in d['last']:\n",
" temperature = dict(temp=d['last']['main']['temp'])\n",
" else:\n",
" temperature = dict()\n",
" # -- weather status info\n",
" if 'weather' in d:\n",
" status = d['weather'][0]['main']\n",
" detailed_status = d['weather'][0]['description']\n",
" weather_code = d['weather'][0]['id']\n",
" weather_icon_name = d['weather'][0]['icon']\n",
" else:\n",
" status = ''\n",
" detailed_status = ''\n",
" weather_code = 0\n",
" weather_icon_name = ''\n",
"\n",
" return Weather(reference_time, sunset_time, sunrise_time, clouds,\n",
" rain, snow, wind, humidity, pressure, temperature,\n",
" status, detailed_status, weather_code, weather_icon_name,\n",
" visibility_distance, dewpoint, humidex, heat_index)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0.013513513513513514,
0.029850746268656716
] | 191 | 0.000369 |
def run(self, agent_host):
"""run the agent on the world"""
total_reward = 0
current_r = 0
tol = 0.01
self.prev_s = None
self.prev_a = None
# wait for a valid observation
world_state = agent_host.peekWorldState()
while world_state.is_mission_running and all(e.text=='{}' for e in world_state.observations):
world_state = agent_host.peekWorldState()
# wait for a frame to arrive after that
num_frames_seen = world_state.number_of_video_frames_since_last_state
while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:
world_state = agent_host.peekWorldState()
world_state = agent_host.getWorldState()
for err in world_state.errors:
print(err)
if not world_state.is_mission_running:
return 0 # mission already ended
assert len(world_state.video_frames) > 0, 'No video frames!?'
obs = json.loads( world_state.observations[-1].text )
prev_x = obs[u'XPos']
prev_z = obs[u'ZPos']
print('Initial position:',prev_x,',',prev_z)
if save_images:
# save the frame, for debugging
frame = world_state.video_frames[-1]
image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )
iFrame = 0
self.rep = self.rep + 1
image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '.png' )
# take first action
total_reward += self.act(world_state,agent_host,current_r)
require_move = True
check_expected_position = True
# main loop:
while world_state.is_mission_running:
# wait for the position to have changed and a reward received
print('Waiting for data...', end=' ')
while True:
world_state = agent_host.peekWorldState()
if not world_state.is_mission_running:
print('mission ended.')
break
if len(world_state.rewards) > 0 and not all(e.text=='{}' for e in world_state.observations):
obs = json.loads( world_state.observations[-1].text )
curr_x = obs[u'XPos']
curr_z = obs[u'ZPos']
if require_move:
if math.hypot( curr_x - prev_x, curr_z - prev_z ) > tol:
print('received.')
break
else:
print('received.')
break
# wait for a frame to arrive after that
num_frames_seen = world_state.number_of_video_frames_since_last_state
while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:
world_state = agent_host.peekWorldState()
num_frames_before_get = len(world_state.video_frames)
world_state = agent_host.getWorldState()
for err in world_state.errors:
print(err)
current_r = sum(r.getValue() for r in world_state.rewards)
if save_images:
# save the frame, for debugging
if world_state.is_mission_running:
assert len(world_state.video_frames) > 0, 'No video frames!?'
frame = world_state.video_frames[-1]
image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )
iFrame = iFrame + 1
image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '_after_' + self.actions[self.prev_a] + '.png' )
if world_state.is_mission_running:
assert len(world_state.video_frames) > 0, 'No video frames!?'
num_frames_after_get = len(world_state.video_frames)
assert num_frames_after_get >= num_frames_before_get, 'Fewer frames after getWorldState!?'
frame = world_state.video_frames[-1]
obs = json.loads( world_state.observations[-1].text )
curr_x = obs[u'XPos']
curr_z = obs[u'ZPos']
print('New position from observation:',curr_x,',',curr_z,'after action:',self.actions[self.prev_a], end=' ') #NSWE
if check_expected_position:
expected_x = prev_x + [0,0,-1,1][self.prev_a]
expected_z = prev_z + [-1,1,0,0][self.prev_a]
if math.hypot( curr_x - expected_x, curr_z - expected_z ) > tol:
print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z)
input("Press Enter to continue...")
else:
print('as expected.')
curr_x_from_render = frame.xPos
curr_z_from_render = frame.zPos
print('New position from render:',curr_x_from_render,',',curr_z_from_render,'after action:',self.actions[self.prev_a], end=' ') #NSWE
if math.hypot( curr_x_from_render - expected_x, curr_z_from_render - expected_z ) > tol:
print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z)
input("Press Enter to continue...")
else:
print('as expected.')
else:
print()
prev_x = curr_x
prev_z = curr_z
# act
total_reward += self.act(world_state, agent_host, current_r)
# process final reward
self.logger.debug("Final reward: %d" % current_r)
total_reward += current_r
# update Q values
if self.training and self.prev_s is not None and self.prev_a is not None:
old_q = self.q_table[self.prev_s][self.prev_a]
self.q_table[self.prev_s][self.prev_a] = old_q + self.alpha * ( current_r - old_q )
self.drawQ()
return total_reward | [
"def",
"run",
"(",
"self",
",",
"agent_host",
")",
":",
"total_reward",
"=",
"0",
"current_r",
"=",
"0",
"tol",
"=",
"0.01",
"self",
".",
"prev_s",
"=",
"None",
"self",
".",
"prev_a",
"=",
"None",
"# wait for a valid observation",
"world_state",
"=",
"agent_host",
".",
"peekWorldState",
"(",
")",
"while",
"world_state",
".",
"is_mission_running",
"and",
"all",
"(",
"e",
".",
"text",
"==",
"'{}'",
"for",
"e",
"in",
"world_state",
".",
"observations",
")",
":",
"world_state",
"=",
"agent_host",
".",
"peekWorldState",
"(",
")",
"# wait for a frame to arrive after that",
"num_frames_seen",
"=",
"world_state",
".",
"number_of_video_frames_since_last_state",
"while",
"world_state",
".",
"is_mission_running",
"and",
"world_state",
".",
"number_of_video_frames_since_last_state",
"==",
"num_frames_seen",
":",
"world_state",
"=",
"agent_host",
".",
"peekWorldState",
"(",
")",
"world_state",
"=",
"agent_host",
".",
"getWorldState",
"(",
")",
"for",
"err",
"in",
"world_state",
".",
"errors",
":",
"print",
"(",
"err",
")",
"if",
"not",
"world_state",
".",
"is_mission_running",
":",
"return",
"0",
"# mission already ended",
"assert",
"len",
"(",
"world_state",
".",
"video_frames",
")",
">",
"0",
",",
"'No video frames!?'",
"obs",
"=",
"json",
".",
"loads",
"(",
"world_state",
".",
"observations",
"[",
"-",
"1",
"]",
".",
"text",
")",
"prev_x",
"=",
"obs",
"[",
"u'XPos'",
"]",
"prev_z",
"=",
"obs",
"[",
"u'ZPos'",
"]",
"print",
"(",
"'Initial position:'",
",",
"prev_x",
",",
"','",
",",
"prev_z",
")",
"if",
"save_images",
":",
"# save the frame, for debugging",
"frame",
"=",
"world_state",
".",
"video_frames",
"[",
"-",
"1",
"]",
"image",
"=",
"Image",
".",
"frombytes",
"(",
"'RGB'",
",",
"(",
"frame",
".",
"width",
",",
"frame",
".",
"height",
")",
",",
"bytes",
"(",
"frame",
".",
"pixels",
")",
")",
"iFrame",
"=",
"0",
"self",
".",
"rep",
"=",
"self",
".",
"rep",
"+",
"1",
"image",
".",
"save",
"(",
"'rep_'",
"+",
"str",
"(",
"self",
".",
"rep",
")",
".",
"zfill",
"(",
"3",
")",
"+",
"'_saved_frame_'",
"+",
"str",
"(",
"iFrame",
")",
".",
"zfill",
"(",
"4",
")",
"+",
"'.png'",
")",
"# take first action",
"total_reward",
"+=",
"self",
".",
"act",
"(",
"world_state",
",",
"agent_host",
",",
"current_r",
")",
"require_move",
"=",
"True",
"check_expected_position",
"=",
"True",
"# main loop:",
"while",
"world_state",
".",
"is_mission_running",
":",
"# wait for the position to have changed and a reward received",
"print",
"(",
"'Waiting for data...'",
",",
"end",
"=",
"' '",
")",
"while",
"True",
":",
"world_state",
"=",
"agent_host",
".",
"peekWorldState",
"(",
")",
"if",
"not",
"world_state",
".",
"is_mission_running",
":",
"print",
"(",
"'mission ended.'",
")",
"break",
"if",
"len",
"(",
"world_state",
".",
"rewards",
")",
">",
"0",
"and",
"not",
"all",
"(",
"e",
".",
"text",
"==",
"'{}'",
"for",
"e",
"in",
"world_state",
".",
"observations",
")",
":",
"obs",
"=",
"json",
".",
"loads",
"(",
"world_state",
".",
"observations",
"[",
"-",
"1",
"]",
".",
"text",
")",
"curr_x",
"=",
"obs",
"[",
"u'XPos'",
"]",
"curr_z",
"=",
"obs",
"[",
"u'ZPos'",
"]",
"if",
"require_move",
":",
"if",
"math",
".",
"hypot",
"(",
"curr_x",
"-",
"prev_x",
",",
"curr_z",
"-",
"prev_z",
")",
">",
"tol",
":",
"print",
"(",
"'received.'",
")",
"break",
"else",
":",
"print",
"(",
"'received.'",
")",
"break",
"# wait for a frame to arrive after that",
"num_frames_seen",
"=",
"world_state",
".",
"number_of_video_frames_since_last_state",
"while",
"world_state",
".",
"is_mission_running",
"and",
"world_state",
".",
"number_of_video_frames_since_last_state",
"==",
"num_frames_seen",
":",
"world_state",
"=",
"agent_host",
".",
"peekWorldState",
"(",
")",
"num_frames_before_get",
"=",
"len",
"(",
"world_state",
".",
"video_frames",
")",
"world_state",
"=",
"agent_host",
".",
"getWorldState",
"(",
")",
"for",
"err",
"in",
"world_state",
".",
"errors",
":",
"print",
"(",
"err",
")",
"current_r",
"=",
"sum",
"(",
"r",
".",
"getValue",
"(",
")",
"for",
"r",
"in",
"world_state",
".",
"rewards",
")",
"if",
"save_images",
":",
"# save the frame, for debugging",
"if",
"world_state",
".",
"is_mission_running",
":",
"assert",
"len",
"(",
"world_state",
".",
"video_frames",
")",
">",
"0",
",",
"'No video frames!?'",
"frame",
"=",
"world_state",
".",
"video_frames",
"[",
"-",
"1",
"]",
"image",
"=",
"Image",
".",
"frombytes",
"(",
"'RGB'",
",",
"(",
"frame",
".",
"width",
",",
"frame",
".",
"height",
")",
",",
"bytes",
"(",
"frame",
".",
"pixels",
")",
")",
"iFrame",
"=",
"iFrame",
"+",
"1",
"image",
".",
"save",
"(",
"'rep_'",
"+",
"str",
"(",
"self",
".",
"rep",
")",
".",
"zfill",
"(",
"3",
")",
"+",
"'_saved_frame_'",
"+",
"str",
"(",
"iFrame",
")",
".",
"zfill",
"(",
"4",
")",
"+",
"'_after_'",
"+",
"self",
".",
"actions",
"[",
"self",
".",
"prev_a",
"]",
"+",
"'.png'",
")",
"if",
"world_state",
".",
"is_mission_running",
":",
"assert",
"len",
"(",
"world_state",
".",
"video_frames",
")",
">",
"0",
",",
"'No video frames!?'",
"num_frames_after_get",
"=",
"len",
"(",
"world_state",
".",
"video_frames",
")",
"assert",
"num_frames_after_get",
">=",
"num_frames_before_get",
",",
"'Fewer frames after getWorldState!?'",
"frame",
"=",
"world_state",
".",
"video_frames",
"[",
"-",
"1",
"]",
"obs",
"=",
"json",
".",
"loads",
"(",
"world_state",
".",
"observations",
"[",
"-",
"1",
"]",
".",
"text",
")",
"curr_x",
"=",
"obs",
"[",
"u'XPos'",
"]",
"curr_z",
"=",
"obs",
"[",
"u'ZPos'",
"]",
"print",
"(",
"'New position from observation:'",
",",
"curr_x",
",",
"','",
",",
"curr_z",
",",
"'after action:'",
",",
"self",
".",
"actions",
"[",
"self",
".",
"prev_a",
"]",
",",
"end",
"=",
"' '",
")",
"#NSWE",
"if",
"check_expected_position",
":",
"expected_x",
"=",
"prev_x",
"+",
"[",
"0",
",",
"0",
",",
"-",
"1",
",",
"1",
"]",
"[",
"self",
".",
"prev_a",
"]",
"expected_z",
"=",
"prev_z",
"+",
"[",
"-",
"1",
",",
"1",
",",
"0",
",",
"0",
"]",
"[",
"self",
".",
"prev_a",
"]",
"if",
"math",
".",
"hypot",
"(",
"curr_x",
"-",
"expected_x",
",",
"curr_z",
"-",
"expected_z",
")",
">",
"tol",
":",
"print",
"(",
"' - ERROR DETECTED! Expected:'",
",",
"expected_x",
",",
"','",
",",
"expected_z",
")",
"input",
"(",
"\"Press Enter to continue...\"",
")",
"else",
":",
"print",
"(",
"'as expected.'",
")",
"curr_x_from_render",
"=",
"frame",
".",
"xPos",
"curr_z_from_render",
"=",
"frame",
".",
"zPos",
"print",
"(",
"'New position from render:'",
",",
"curr_x_from_render",
",",
"','",
",",
"curr_z_from_render",
",",
"'after action:'",
",",
"self",
".",
"actions",
"[",
"self",
".",
"prev_a",
"]",
",",
"end",
"=",
"' '",
")",
"#NSWE",
"if",
"math",
".",
"hypot",
"(",
"curr_x_from_render",
"-",
"expected_x",
",",
"curr_z_from_render",
"-",
"expected_z",
")",
">",
"tol",
":",
"print",
"(",
"' - ERROR DETECTED! Expected:'",
",",
"expected_x",
",",
"','",
",",
"expected_z",
")",
"input",
"(",
"\"Press Enter to continue...\"",
")",
"else",
":",
"print",
"(",
"'as expected.'",
")",
"else",
":",
"print",
"(",
")",
"prev_x",
"=",
"curr_x",
"prev_z",
"=",
"curr_z",
"# act",
"total_reward",
"+=",
"self",
".",
"act",
"(",
"world_state",
",",
"agent_host",
",",
"current_r",
")",
"# process final reward",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Final reward: %d\"",
"%",
"current_r",
")",
"total_reward",
"+=",
"current_r",
"# update Q values",
"if",
"self",
".",
"training",
"and",
"self",
".",
"prev_s",
"is",
"not",
"None",
"and",
"self",
".",
"prev_a",
"is",
"not",
"None",
":",
"old_q",
"=",
"self",
".",
"q_table",
"[",
"self",
".",
"prev_s",
"]",
"[",
"self",
".",
"prev_a",
"]",
"self",
".",
"q_table",
"[",
"self",
".",
"prev_s",
"]",
"[",
"self",
".",
"prev_a",
"]",
"=",
"old_q",
"+",
"self",
".",
"alpha",
"*",
"(",
"current_r",
"-",
"old_q",
")",
"self",
".",
"drawQ",
"(",
")",
"return",
"total_reward"
] | 47.090909 | 0.014337 | [
"def run(self, agent_host):\n",
" \"\"\"run the agent on the world\"\"\"\n",
"\n",
" total_reward = 0\n",
" current_r = 0\n",
" tol = 0.01\n",
" \n",
" self.prev_s = None\n",
" self.prev_a = None\n",
" \n",
" # wait for a valid observation\n",
" world_state = agent_host.peekWorldState()\n",
" while world_state.is_mission_running and all(e.text=='{}' for e in world_state.observations):\n",
" world_state = agent_host.peekWorldState()\n",
" # wait for a frame to arrive after that\n",
" num_frames_seen = world_state.number_of_video_frames_since_last_state\n",
" while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:\n",
" world_state = agent_host.peekWorldState()\n",
" world_state = agent_host.getWorldState()\n",
" for err in world_state.errors:\n",
" print(err)\n",
"\n",
" if not world_state.is_mission_running:\n",
" return 0 # mission already ended\n",
" \n",
" assert len(world_state.video_frames) > 0, 'No video frames!?'\n",
" \n",
" obs = json.loads( world_state.observations[-1].text )\n",
" prev_x = obs[u'XPos']\n",
" prev_z = obs[u'ZPos']\n",
" print('Initial position:',prev_x,',',prev_z)\n",
" \n",
" if save_images:\n",
" # save the frame, for debugging\n",
" frame = world_state.video_frames[-1]\n",
" image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )\n",
" iFrame = 0\n",
" self.rep = self.rep + 1\n",
" image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '.png' )\n",
" \n",
" # take first action\n",
" total_reward += self.act(world_state,agent_host,current_r)\n",
" \n",
" require_move = True\n",
" check_expected_position = True\n",
" \n",
" # main loop:\n",
" while world_state.is_mission_running:\n",
" \n",
" # wait for the position to have changed and a reward received\n",
" print('Waiting for data...', end=' ')\n",
" while True:\n",
" world_state = agent_host.peekWorldState()\n",
" if not world_state.is_mission_running:\n",
" print('mission ended.')\n",
" break\n",
" if len(world_state.rewards) > 0 and not all(e.text=='{}' for e in world_state.observations):\n",
" obs = json.loads( world_state.observations[-1].text )\n",
" curr_x = obs[u'XPos']\n",
" curr_z = obs[u'ZPos']\n",
" if require_move:\n",
" if math.hypot( curr_x - prev_x, curr_z - prev_z ) > tol:\n",
" print('received.')\n",
" break\n",
" else:\n",
" print('received.')\n",
" break\n",
" # wait for a frame to arrive after that\n",
" num_frames_seen = world_state.number_of_video_frames_since_last_state\n",
" while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:\n",
" world_state = agent_host.peekWorldState()\n",
" \n",
" num_frames_before_get = len(world_state.video_frames)\n",
" \n",
" world_state = agent_host.getWorldState()\n",
" for err in world_state.errors:\n",
" print(err)\n",
" current_r = sum(r.getValue() for r in world_state.rewards)\n",
"\n",
" if save_images:\n",
" # save the frame, for debugging\n",
" if world_state.is_mission_running:\n",
" assert len(world_state.video_frames) > 0, 'No video frames!?'\n",
" frame = world_state.video_frames[-1]\n",
" image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )\n",
" iFrame = iFrame + 1\n",
" image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '_after_' + self.actions[self.prev_a] + '.png' )\n",
" \n",
" if world_state.is_mission_running:\n",
" assert len(world_state.video_frames) > 0, 'No video frames!?'\n",
" num_frames_after_get = len(world_state.video_frames)\n",
" assert num_frames_after_get >= num_frames_before_get, 'Fewer frames after getWorldState!?'\n",
" frame = world_state.video_frames[-1]\n",
" obs = json.loads( world_state.observations[-1].text )\n",
" curr_x = obs[u'XPos']\n",
" curr_z = obs[u'ZPos']\n",
" print('New position from observation:',curr_x,',',curr_z,'after action:',self.actions[self.prev_a], end=' ') #NSWE\n",
" if check_expected_position:\n",
" expected_x = prev_x + [0,0,-1,1][self.prev_a]\n",
" expected_z = prev_z + [-1,1,0,0][self.prev_a]\n",
" if math.hypot( curr_x - expected_x, curr_z - expected_z ) > tol:\n",
" print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z)\n",
" input(\"Press Enter to continue...\")\n",
" else:\n",
" print('as expected.')\n",
" curr_x_from_render = frame.xPos\n",
" curr_z_from_render = frame.zPos\n",
" print('New position from render:',curr_x_from_render,',',curr_z_from_render,'after action:',self.actions[self.prev_a], end=' ') #NSWE\n",
" if math.hypot( curr_x_from_render - expected_x, curr_z_from_render - expected_z ) > tol:\n",
" print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z)\n",
" input(\"Press Enter to continue...\")\n",
" else:\n",
" print('as expected.')\n",
" else:\n",
" print()\n",
" prev_x = curr_x\n",
" prev_z = curr_z\n",
" # act\n",
" total_reward += self.act(world_state, agent_host, current_r)\n",
" \n",
" # process final reward\n",
" self.logger.debug(\"Final reward: %d\" % current_r)\n",
" total_reward += current_r\n",
"\n",
" # update Q values\n",
" if self.training and self.prev_s is not None and self.prev_a is not None:\n",
" old_q = self.q_table[self.prev_s][self.prev_a]\n",
" self.q_table[self.prev_s][self.prev_a] = old_q + self.alpha * ( current_r - old_q )\n",
" \n",
" self.drawQ()\n",
" \n",
" return total_reward"
] | [
0,
0.024390243902439025,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.0196078431372549,
0,
0,
0,
0.008264462809917356,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0.07692307692307693,
0,
0.1111111111111111,
0.03225806451612903,
0,
0,
0.05660377358490566,
0.1111111111111111,
0,
0,
0,
0.02127659574468085,
0,
0,
0.027777777777777776,
0.07692307692307693,
0,
0.029850746268656716,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.01834862385321101,
0.02702702702702703,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.008,
0,
0.058823529411764705,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0.0196078431372549,
0,
0.019230769230769232,
0.058823529411764705,
0,
0,
0,
0.009345794392523364,
0,
0.02857142857142857,
0,
0,
0.061068702290076333,
0,
0.045454545454545456,
0.045454545454545456,
0.03529411764705882,
0.045454545454545456,
0,
0,
0,
0,
0,
0.05194805194805195,
0.027522935779816515,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0.03125,
0.07692307692307693,
0,
0.2,
0.037037037037037035
] | 132 | 0.017681 |
def autodiscover(module_name=None):
"""
Autodiscover INSTALLED_APPS perms.py modules and fail silently when not
present. This forces an import on them to register any permissions bits
they may want.
"""
from django.utils.module_loading import module_has_submodule
from permission.compat import import_module
from permission.conf import settings
module_name = module_name or settings.PERMISSION_AUTODISCOVER_MODULE_NAME
app_names = (app.name for app in apps.app_configs.values())
for app in app_names:
mod = import_module(app)
# Attempt to import the app's perms module
try:
# discover the permission module
discover(app, module_name=module_name)
except:
# Decide whether to bubble up this error. If the app just doesn't
# have an perms module, we can just ignore the error attempting
# to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, module_name):
raise | [
"def",
"autodiscover",
"(",
"module_name",
"=",
"None",
")",
":",
"from",
"django",
".",
"utils",
".",
"module_loading",
"import",
"module_has_submodule",
"from",
"permission",
".",
"compat",
"import",
"import_module",
"from",
"permission",
".",
"conf",
"import",
"settings",
"module_name",
"=",
"module_name",
"or",
"settings",
".",
"PERMISSION_AUTODISCOVER_MODULE_NAME",
"app_names",
"=",
"(",
"app",
".",
"name",
"for",
"app",
"in",
"apps",
".",
"app_configs",
".",
"values",
"(",
")",
")",
"for",
"app",
"in",
"app_names",
":",
"mod",
"=",
"import_module",
"(",
"app",
")",
"# Attempt to import the app's perms module",
"try",
":",
"# discover the permission module",
"discover",
"(",
"app",
",",
"module_name",
"=",
"module_name",
")",
"except",
":",
"# Decide whether to bubble up this error. If the app just doesn't",
"# have an perms module, we can just ignore the error attempting",
"# to import it, otherwise we want it to bubble up.",
"if",
"module_has_submodule",
"(",
"mod",
",",
"module_name",
")",
":",
"raise"
] | 41 | 0.001907 | [
"def autodiscover(module_name=None):\n",
" \"\"\"\n",
" Autodiscover INSTALLED_APPS perms.py modules and fail silently when not\n",
" present. This forces an import on them to register any permissions bits\n",
" they may want.\n",
" \"\"\"\n",
" from django.utils.module_loading import module_has_submodule\n",
" from permission.compat import import_module\n",
" from permission.conf import settings\n",
"\n",
" module_name = module_name or settings.PERMISSION_AUTODISCOVER_MODULE_NAME\n",
" app_names = (app.name for app in apps.app_configs.values())\n",
"\n",
" for app in app_names:\n",
" mod = import_module(app)\n",
" # Attempt to import the app's perms module\n",
" try:\n",
" # discover the permission module\n",
" discover(app, module_name=module_name)\n",
" except:\n",
" # Decide whether to bubble up this error. If the app just doesn't\n",
" # have an perms module, we can just ignore the error attempting\n",
" # to import it, otherwise we want it to bubble up.\n",
" if module_has_submodule(mod, module_name):\n",
" raise"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0.047619047619047616
] | 25 | 0.004405 |
def json2py(json_obj):
"""
Converts the inputted JSON object to a python value.
:param json_obj | <variant>
"""
for key, value in json_obj.items():
if type(value) not in (str, unicode):
continue
# restore a datetime
if re.match('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d+$', value):
value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S:%f')
elif re.match('^\d{4}-\d{2}-\d{2}$', value):
year, month, day = map(int, value.split('-'))
value = datetime.date(year, month, day)
elif re.match('^\d{2}:\d{2}:\d{2}:\d+$', value):
hour, minute, second, micro = map(int, value.split(':'))
value = datetime.time(hour, minute, second, micro)
else:
found = False
for decoder in _decoders:
success, new_value = decoder(value)
if success:
value = new_value
found = True
break
if not found:
continue
json_obj[key] = value
return json_obj | [
"def",
"json2py",
"(",
"json_obj",
")",
":",
"for",
"key",
",",
"value",
"in",
"json_obj",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"value",
")",
"not",
"in",
"(",
"str",
",",
"unicode",
")",
":",
"continue",
"# restore a datetime",
"if",
"re",
".",
"match",
"(",
"'^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}:\\d+$'",
",",
"value",
")",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"'%Y-%m-%d %H:%M:%S:%f'",
")",
"elif",
"re",
".",
"match",
"(",
"'^\\d{4}-\\d{2}-\\d{2}$'",
",",
"value",
")",
":",
"year",
",",
"month",
",",
"day",
"=",
"map",
"(",
"int",
",",
"value",
".",
"split",
"(",
"'-'",
")",
")",
"value",
"=",
"datetime",
".",
"date",
"(",
"year",
",",
"month",
",",
"day",
")",
"elif",
"re",
".",
"match",
"(",
"'^\\d{2}:\\d{2}:\\d{2}:\\d+$'",
",",
"value",
")",
":",
"hour",
",",
"minute",
",",
"second",
",",
"micro",
"=",
"map",
"(",
"int",
",",
"value",
".",
"split",
"(",
"':'",
")",
")",
"value",
"=",
"datetime",
".",
"time",
"(",
"hour",
",",
"minute",
",",
"second",
",",
"micro",
")",
"else",
":",
"found",
"=",
"False",
"for",
"decoder",
"in",
"_decoders",
":",
"success",
",",
"new_value",
"=",
"decoder",
"(",
"value",
")",
"if",
"success",
":",
"value",
"=",
"new_value",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"continue",
"json_obj",
"[",
"key",
"]",
"=",
"value",
"return",
"json_obj"
] | 33.424242 | 0.014097 | [
"def json2py(json_obj):\n",
" \"\"\"\n",
" Converts the inputted JSON object to a python value.\n",
" \n",
" :param json_obj | <variant>\n",
" \"\"\"\n",
" for key, value in json_obj.items():\n",
" if type(value) not in (str, unicode):\n",
" continue\n",
"\n",
" # restore a datetime\n",
" if re.match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}:\\d+$', value):\n",
" value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S:%f')\n",
" elif re.match('^\\d{4}-\\d{2}-\\d{2}$', value):\n",
" year, month, day = map(int, value.split('-'))\n",
" value = datetime.date(year, month, day)\n",
" elif re.match('^\\d{2}:\\d{2}:\\d{2}:\\d+$', value):\n",
" hour, minute, second, micro = map(int, value.split(':'))\n",
" value = datetime.time(hour, minute, second, micro)\n",
" else:\n",
" found = False\n",
" for decoder in _decoders:\n",
" success, new_value = decoder(value)\n",
" if success:\n",
" value = new_value\n",
" found = True\n",
" break\n",
"\n",
" if not found:\n",
" continue\n",
"\n",
" json_obj[key] = value\n",
" return json_obj"
] | [
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0.0958904109589041,
0,
0.05660377358490566,
0,
0,
0.07017543859649122,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 33 | 0.014403 |
def join_multiline_pairs(source, pair="()"):
"""
Finds and removes newlines in multiline matching pairs of characters in
*source*.
By default it joins parens () but it will join any two characters given via
the *pair* variable.
.. note::
Doesn't remove extraneous whitespace that ends up between the pair.
Use `reduce_operators()` for that.
Example::
test = (
"This is inside a multi-line pair of parentheses"
)
Will become::
test = ( "This is inside a multi-line pair of parentheses" )
"""
opener = pair[0]
closer = pair[1]
io_obj = io.StringIO(source)
out_tokens = []
open_count = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.OP and token_string in pair:
if token_string == opener:
open_count += 1
elif token_string == closer:
open_count -= 1
out_tokens.append(tok)
elif token_type in (tokenize.NL, tokenize.NEWLINE):
if open_count == 0:
out_tokens.append(tok)
else:
out_tokens.append(tok)
return token_utils.untokenize(out_tokens) | [
"def",
"join_multiline_pairs",
"(",
"source",
",",
"pair",
"=",
"\"()\"",
")",
":",
"opener",
"=",
"pair",
"[",
"0",
"]",
"closer",
"=",
"pair",
"[",
"1",
"]",
"io_obj",
"=",
"io",
".",
"StringIO",
"(",
"source",
")",
"out_tokens",
"=",
"[",
"]",
"open_count",
"=",
"0",
"for",
"tok",
"in",
"tokenize",
".",
"generate_tokens",
"(",
"io_obj",
".",
"readline",
")",
":",
"token_type",
"=",
"tok",
"[",
"0",
"]",
"token_string",
"=",
"tok",
"[",
"1",
"]",
"if",
"token_type",
"==",
"tokenize",
".",
"OP",
"and",
"token_string",
"in",
"pair",
":",
"if",
"token_string",
"==",
"opener",
":",
"open_count",
"+=",
"1",
"elif",
"token_string",
"==",
"closer",
":",
"open_count",
"-=",
"1",
"out_tokens",
".",
"append",
"(",
"tok",
")",
"elif",
"token_type",
"in",
"(",
"tokenize",
".",
"NL",
",",
"tokenize",
".",
"NEWLINE",
")",
":",
"if",
"open_count",
"==",
"0",
":",
"out_tokens",
".",
"append",
"(",
"tok",
")",
"else",
":",
"out_tokens",
".",
"append",
"(",
"tok",
")",
"return",
"token_utils",
".",
"untokenize",
"(",
"out_tokens",
")"
] | 28.613636 | 0.001536 | [
"def join_multiline_pairs(source, pair=\"()\"):\n",
" \"\"\"\n",
" Finds and removes newlines in multiline matching pairs of characters in\n",
" *source*.\n",
"\n",
" By default it joins parens () but it will join any two characters given via\n",
" the *pair* variable.\n",
"\n",
" .. note::\n",
"\n",
" Doesn't remove extraneous whitespace that ends up between the pair.\n",
" Use `reduce_operators()` for that.\n",
"\n",
" Example::\n",
"\n",
" test = (\n",
" \"This is inside a multi-line pair of parentheses\"\n",
" )\n",
"\n",
" Will become::\n",
"\n",
" test = ( \"This is inside a multi-line pair of parentheses\" )\n",
"\n",
" \"\"\"\n",
" opener = pair[0]\n",
" closer = pair[1]\n",
" io_obj = io.StringIO(source)\n",
" out_tokens = []\n",
" open_count = 0\n",
" for tok in tokenize.generate_tokens(io_obj.readline):\n",
" token_type = tok[0]\n",
" token_string = tok[1]\n",
" if token_type == tokenize.OP and token_string in pair:\n",
" if token_string == opener:\n",
" open_count += 1\n",
" elif token_string == closer:\n",
" open_count -= 1\n",
" out_tokens.append(tok)\n",
" elif token_type in (tokenize.NL, tokenize.NEWLINE):\n",
" if open_count == 0:\n",
" out_tokens.append(tok)\n",
" else:\n",
" out_tokens.append(tok)\n",
" return token_utils.untokenize(out_tokens)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223
] | 44 | 0.000766 |
def do_watch(self, params):
"""
\x1b[1mNAME\x1b[0m
watch - Recursively watch for all changes under a path.
\x1b[1mSYNOPSIS\x1b[0m
watch <start|stop|stats> <path> [options]
\x1b[1mDESCRIPTION\x1b[0m
watch start <path> [debug] [depth]
with debug=true, print watches as they fire. depth is
the level for recursively setting watches:
* -1: recurse all the way
* 0: don't recurse, only watch the given path
* > 0: recurse up to <level> children
watch stats <path> [repeat] [sleep]
with repeat=0 this command will loop until interrupted. sleep sets
the pause duration in between each iteration.
watch stop <path>
\x1b[1mEXAMPLES\x1b[0m
> watch start /foo/bar
> watch stop /foo/bar
> watch stats /foo/bar
"""
wm = get_watch_manager(self._zk)
if params.command == "start":
debug = to_bool(params.debug)
children = to_int(params.sleep, -1)
wm.add(params.path, debug, children)
elif params.command == "stop":
wm.remove(params.path)
elif params.command == "stats":
repeat = to_int(params.debug, 1)
sleep = to_int(params.sleep, 1)
if repeat == 0:
while True:
wm.stats(params.path)
time.sleep(sleep)
else:
for _ in range(0, repeat):
wm.stats(params.path)
time.sleep(sleep)
else:
self.show_output("watch <start|stop|stats> <path> [verbose]") | [
"def",
"do_watch",
"(",
"self",
",",
"params",
")",
":",
"wm",
"=",
"get_watch_manager",
"(",
"self",
".",
"_zk",
")",
"if",
"params",
".",
"command",
"==",
"\"start\"",
":",
"debug",
"=",
"to_bool",
"(",
"params",
".",
"debug",
")",
"children",
"=",
"to_int",
"(",
"params",
".",
"sleep",
",",
"-",
"1",
")",
"wm",
".",
"add",
"(",
"params",
".",
"path",
",",
"debug",
",",
"children",
")",
"elif",
"params",
".",
"command",
"==",
"\"stop\"",
":",
"wm",
".",
"remove",
"(",
"params",
".",
"path",
")",
"elif",
"params",
".",
"command",
"==",
"\"stats\"",
":",
"repeat",
"=",
"to_int",
"(",
"params",
".",
"debug",
",",
"1",
")",
"sleep",
"=",
"to_int",
"(",
"params",
".",
"sleep",
",",
"1",
")",
"if",
"repeat",
"==",
"0",
":",
"while",
"True",
":",
"wm",
".",
"stats",
"(",
"params",
".",
"path",
")",
"time",
".",
"sleep",
"(",
"sleep",
")",
"else",
":",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"repeat",
")",
":",
"wm",
".",
"stats",
"(",
"params",
".",
"path",
")",
"time",
".",
"sleep",
"(",
"sleep",
")",
"else",
":",
"self",
".",
"show_output",
"(",
"\"watch <start|stop|stats> <path> [verbose]\"",
")"
] | 31.254902 | 0.001217 | [
"def do_watch(self, params):\n",
" \"\"\"\n",
"\\x1b[1mNAME\\x1b[0m\n",
" watch - Recursively watch for all changes under a path.\n",
"\n",
"\\x1b[1mSYNOPSIS\\x1b[0m\n",
" watch <start|stop|stats> <path> [options]\n",
"\n",
"\\x1b[1mDESCRIPTION\\x1b[0m\n",
" watch start <path> [debug] [depth]\n",
"\n",
" with debug=true, print watches as they fire. depth is\n",
" the level for recursively setting watches:\n",
"\n",
" * -1: recurse all the way\n",
" * 0: don't recurse, only watch the given path\n",
" * > 0: recurse up to <level> children\n",
"\n",
" watch stats <path> [repeat] [sleep]\n",
"\n",
" with repeat=0 this command will loop until interrupted. sleep sets\n",
" the pause duration in between each iteration.\n",
"\n",
" watch stop <path>\n",
"\n",
"\\x1b[1mEXAMPLES\\x1b[0m\n",
" > watch start /foo/bar\n",
" > watch stop /foo/bar\n",
" > watch stats /foo/bar\n",
"\n",
" \"\"\"\n",
" wm = get_watch_manager(self._zk)\n",
" if params.command == \"start\":\n",
" debug = to_bool(params.debug)\n",
" children = to_int(params.sleep, -1)\n",
" wm.add(params.path, debug, children)\n",
" elif params.command == \"stop\":\n",
" wm.remove(params.path)\n",
" elif params.command == \"stats\":\n",
" repeat = to_int(params.debug, 1)\n",
" sleep = to_int(params.sleep, 1)\n",
" if repeat == 0:\n",
" while True:\n",
" wm.stats(params.path)\n",
" time.sleep(sleep)\n",
" else:\n",
" for _ in range(0, repeat):\n",
" wm.stats(params.path)\n",
" time.sleep(sleep)\n",
" else:\n",
" self.show_output(\"watch <start|stop|stats> <path> [verbose]\")"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863
] | 51 | 0.001903 |
def from_archive(cls, archive: Archive, predictor_name: str = None) -> 'Predictor':
"""
Instantiate a :class:`Predictor` from an :class:`~allennlp.models.archival.Archive`;
that is, from the result of training a model. Optionally specify which `Predictor`
subclass; otherwise, the default one for the model will be used.
"""
# Duplicate the config so that the config inside the archive doesn't get consumed
config = archive.config.duplicate()
if not predictor_name:
model_type = config.get("model").get("type")
if not model_type in DEFAULT_PREDICTORS:
raise ConfigurationError(f"No default predictor for model type {model_type}.\n"\
f"Please specify a predictor explicitly.")
predictor_name = DEFAULT_PREDICTORS[model_type]
dataset_reader_params = config["dataset_reader"]
dataset_reader = DatasetReader.from_params(dataset_reader_params)
model = archive.model
model.eval()
return Predictor.by_name(predictor_name)(model, dataset_reader) | [
"def",
"from_archive",
"(",
"cls",
",",
"archive",
":",
"Archive",
",",
"predictor_name",
":",
"str",
"=",
"None",
")",
"->",
"'Predictor'",
":",
"# Duplicate the config so that the config inside the archive doesn't get consumed",
"config",
"=",
"archive",
".",
"config",
".",
"duplicate",
"(",
")",
"if",
"not",
"predictor_name",
":",
"model_type",
"=",
"config",
".",
"get",
"(",
"\"model\"",
")",
".",
"get",
"(",
"\"type\"",
")",
"if",
"not",
"model_type",
"in",
"DEFAULT_PREDICTORS",
":",
"raise",
"ConfigurationError",
"(",
"f\"No default predictor for model type {model_type}.\\n\"",
"f\"Please specify a predictor explicitly.\"",
")",
"predictor_name",
"=",
"DEFAULT_PREDICTORS",
"[",
"model_type",
"]",
"dataset_reader_params",
"=",
"config",
"[",
"\"dataset_reader\"",
"]",
"dataset_reader",
"=",
"DatasetReader",
".",
"from_params",
"(",
"dataset_reader_params",
")",
"model",
"=",
"archive",
".",
"model",
"model",
".",
"eval",
"(",
")",
"return",
"Predictor",
".",
"by_name",
"(",
"predictor_name",
")",
"(",
"model",
",",
"dataset_reader",
")"
] | 48.521739 | 0.008787 | [
"def from_archive(cls, archive: Archive, predictor_name: str = None) -> 'Predictor':\n",
" \"\"\"\n",
" Instantiate a :class:`Predictor` from an :class:`~allennlp.models.archival.Archive`;\n",
" that is, from the result of training a model. Optionally specify which `Predictor`\n",
" subclass; otherwise, the default one for the model will be used.\n",
" \"\"\"\n",
" # Duplicate the config so that the config inside the archive doesn't get consumed\n",
" config = archive.config.duplicate()\n",
"\n",
" if not predictor_name:\n",
" model_type = config.get(\"model\").get(\"type\")\n",
" if not model_type in DEFAULT_PREDICTORS:\n",
" raise ConfigurationError(f\"No default predictor for model type {model_type}.\\n\"\\\n",
" f\"Please specify a predictor explicitly.\")\n",
" predictor_name = DEFAULT_PREDICTORS[model_type]\n",
"\n",
" dataset_reader_params = config[\"dataset_reader\"]\n",
" dataset_reader = DatasetReader.from_params(dataset_reader_params)\n",
"\n",
" model = archive.model\n",
" model.eval()\n",
"\n",
" return Predictor.by_name(predictor_name)(model, dataset_reader)"
] | [
0.011904761904761904,
0.08333333333333333,
0.010752688172043012,
0.01098901098901099,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0.018867924528301886,
0.020618556701030927,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.014084507042253521
] | 23 | 0.008416 |
def block(context_name, parent_block_func, view_func=None):
"""A decorator that is used for inserting the decorated block function in
the block template hierarchy.
The :func:`block` decorator accepts the following arguments:
:param context_name: key in the `g.blocks` dictionary in which the result
of the decorated block function will be stored for
further processing by the parent block function
`parent_block_func`.
:param parent_block_func: parent block function in the template hierarchy
which will use the stored result.
:param view_func: the decorated block will take an effect only in the
execution context of the specified view function. If the
default value of `None` is used, then the block will be
used as default for the specified `context_name`.
Internally this parameter is converted to a Werkzeug
endpoint in the same way Flask is doing that with the
`Flask.route` decorator.
"""
def decorator(block_func):
block = Block(block_func, view_func)
parent_block = Block.block_mapping[parent_block_func]
parent_block.append_context_block(context_name, block)
return block_func
return decorator | [
"def",
"block",
"(",
"context_name",
",",
"parent_block_func",
",",
"view_func",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"block_func",
")",
":",
"block",
"=",
"Block",
"(",
"block_func",
",",
"view_func",
")",
"parent_block",
"=",
"Block",
".",
"block_mapping",
"[",
"parent_block_func",
"]",
"parent_block",
".",
"append_context_block",
"(",
"context_name",
",",
"block",
")",
"return",
"block_func",
"return",
"decorator"
] | 53.307692 | 0.000709 | [
"def block(context_name, parent_block_func, view_func=None):\n",
" \"\"\"A decorator that is used for inserting the decorated block function in\n",
" the block template hierarchy.\n",
"\n",
" The :func:`block` decorator accepts the following arguments:\n",
"\n",
" :param context_name: key in the `g.blocks` dictionary in which the result\n",
" of the decorated block function will be stored for\n",
" further processing by the parent block function\n",
" `parent_block_func`.\n",
" :param parent_block_func: parent block function in the template hierarchy\n",
" which will use the stored result.\n",
" :param view_func: the decorated block will take an effect only in the\n",
" execution context of the specified view function. If the\n",
" default value of `None` is used, then the block will be\n",
" used as default for the specified `context_name`.\n",
" Internally this parameter is converted to a Werkzeug\n",
" endpoint in the same way Flask is doing that with the\n",
" `Flask.route` decorator.\n",
" \"\"\"\n",
" def decorator(block_func):\n",
" block = Block(block_func, view_func)\n",
" parent_block = Block.block_mapping[parent_block_func]\n",
" parent_block.append_context_block(context_name, block)\n",
" return block_func\n",
" return decorator"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 26 | 0.001923 |
def keys(self):
"""Create an ordered dict of the names and values of key fields."""
keys = OrderedDict()
def order_key(_):
(k, v) = _
cache_key = getattr(type(self), k)
return cache_key.order
items = [(k, getattr(type(self), k)) for k
in dir(type(self))
]
items = [(k, v) for (k, v)
in items
if isinstance(v, Key)
]
for k, v in sorted(items, key=order_key):
keys[k] = getattr(self, k)
return keys | [
"def",
"keys",
"(",
"self",
")",
":",
"keys",
"=",
"OrderedDict",
"(",
")",
"def",
"order_key",
"(",
"_",
")",
":",
"(",
"k",
",",
"v",
")",
"=",
"_",
"cache_key",
"=",
"getattr",
"(",
"type",
"(",
"self",
")",
",",
"k",
")",
"return",
"cache_key",
".",
"order",
"items",
"=",
"[",
"(",
"k",
",",
"getattr",
"(",
"type",
"(",
"self",
")",
",",
"k",
")",
")",
"for",
"k",
"in",
"dir",
"(",
"type",
"(",
"self",
")",
")",
"]",
"items",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"items",
"if",
"isinstance",
"(",
"v",
",",
"Key",
")",
"]",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"items",
",",
"key",
"=",
"order_key",
")",
":",
"keys",
"[",
"k",
"]",
"=",
"getattr",
"(",
"self",
",",
"k",
")",
"return",
"keys"
] | 25.52381 | 0.01259 | [
"def keys(self):\n",
" \"\"\"Create an ordered dict of the names and values of key fields.\"\"\"\n",
"\n",
" keys = OrderedDict()\n",
"\n",
" def order_key(_):\n",
" (k, v) = _\n",
" cache_key = getattr(type(self), k)\n",
" return cache_key.order\n",
"\n",
" items = [(k, getattr(type(self), k)) for k\n",
" in dir(type(self))\n",
" ]\n",
" items = [(k, v) for (k, v)\n",
" in items\n",
" if isinstance(v, Key)\n",
" ]\n",
"\n",
" for k, v in sorted(items, key=order_key):\n",
" keys[k] = getattr(self, k)\n",
" return keys"
] | [
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0.1,
0,
0.047619047619047616,
0.029411764705882353,
0.1,
0,
0,
0,
0.05263157894736842
] | 21 | 0.017861 |
def update(self, points, pointvol=0., vol_dec=0.5, vol_check=2.,
rstate=None, bootstrap=0, pool=None, mc_integrate=False):
"""
Update the set of ellipsoids to bound the collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
pointvol : float, optional
The minimum volume associated with each point. Default is `0.`.
vol_dec : float, optional
The required fractional reduction in volume after splitting
an ellipsoid in order to to accept the split.
Default is `0.5`.
vol_check : float, optional
The factor used when checking if the volume of the original
bounding ellipsoid is large enough to warrant `> 2` splits
via `ell.vol > vol_check * nlive * pointvol`.
Default is `2.0`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoids. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
volume and fractional overlap of the final union of ellipsoids
with the unit cube. Default is `False`.
"""
if rstate is None:
rstate = np.random
if not HAVE_KMEANS:
raise ValueError("scipy.cluster.vq.kmeans2 is required "
"to compute ellipsoid decompositions.")
npoints, ndim = points.shape
# Calculate the bounding ellipsoid for the points, possibly
# enlarged to a minimum volume.
firstell = bounding_ellipsoid(points, pointvol=pointvol)
# Recursively split the bounding ellipsoid using `vol_check`
# until the volume of each split no longer decreases by a
# factor of `vol_dec`.
ells = _bounding_ellipsoids(points, firstell, pointvol=pointvol,
vol_dec=vol_dec, vol_check=vol_check)
# Update the set of ellipsoids.
self.nells = len(ells)
self.ells = ells
self.ctrs = np.array([ell.ctr for ell in self.ells])
self.covs = np.array([ell.cov for ell in self.ells])
self.ams = np.array([ell.am for ell in self.ells])
self.vols = np.array([ell.vol for ell in self.ells])
self.vol_tot = sum(self.vols)
# Compute expansion factor.
expands = np.array([ell.expand for ell in self.ells])
vols_orig = self.vols / expands
vol_tot_orig = sum(vols_orig)
self.expand_tot = self.vol_tot / vol_tot_orig
# Use bootstrapping to determine the volume expansion factor.
if bootstrap > 0:
# If provided, compute bootstraps in parallel using a pool.
if pool is None:
M = map
else:
M = pool.map
ps = [points for it in range(bootstrap)]
pvs = [pointvol for it in range(bootstrap)]
vds = [vol_dec for it in range(bootstrap)]
vcs = [vol_check for it in range(bootstrap)]
args = zip(ps, pvs, vds, vcs)
expands = list(M(_ellipsoids_bootstrap_expand, args))
# Conservatively set the expansion factor to be the maximum
# factor derived from our set of bootstraps.
expand = max(expands)
# If our ellipsoids are overly constrained, expand them.
if expand > 1.:
vs = self.vols * expand**ndim
self.scale_to_vols(vs)
# Estimate the volume and fractional overlap with the unit cube
# using Monte Carlo integration.
if mc_integrate:
self.vol, self.funit = self.monte_carlo_vol(return_overlap=True) | [
"def",
"update",
"(",
"self",
",",
"points",
",",
"pointvol",
"=",
"0.",
",",
"vol_dec",
"=",
"0.5",
",",
"vol_check",
"=",
"2.",
",",
"rstate",
"=",
"None",
",",
"bootstrap",
"=",
"0",
",",
"pool",
"=",
"None",
",",
"mc_integrate",
"=",
"False",
")",
":",
"if",
"rstate",
"is",
"None",
":",
"rstate",
"=",
"np",
".",
"random",
"if",
"not",
"HAVE_KMEANS",
":",
"raise",
"ValueError",
"(",
"\"scipy.cluster.vq.kmeans2 is required \"",
"\"to compute ellipsoid decompositions.\"",
")",
"npoints",
",",
"ndim",
"=",
"points",
".",
"shape",
"# Calculate the bounding ellipsoid for the points, possibly",
"# enlarged to a minimum volume.",
"firstell",
"=",
"bounding_ellipsoid",
"(",
"points",
",",
"pointvol",
"=",
"pointvol",
")",
"# Recursively split the bounding ellipsoid using `vol_check`",
"# until the volume of each split no longer decreases by a",
"# factor of `vol_dec`.",
"ells",
"=",
"_bounding_ellipsoids",
"(",
"points",
",",
"firstell",
",",
"pointvol",
"=",
"pointvol",
",",
"vol_dec",
"=",
"vol_dec",
",",
"vol_check",
"=",
"vol_check",
")",
"# Update the set of ellipsoids.",
"self",
".",
"nells",
"=",
"len",
"(",
"ells",
")",
"self",
".",
"ells",
"=",
"ells",
"self",
".",
"ctrs",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"ctr",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"covs",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"cov",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"ams",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"am",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"vols",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"vol",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"self",
".",
"vol_tot",
"=",
"sum",
"(",
"self",
".",
"vols",
")",
"# Compute expansion factor.",
"expands",
"=",
"np",
".",
"array",
"(",
"[",
"ell",
".",
"expand",
"for",
"ell",
"in",
"self",
".",
"ells",
"]",
")",
"vols_orig",
"=",
"self",
".",
"vols",
"/",
"expands",
"vol_tot_orig",
"=",
"sum",
"(",
"vols_orig",
")",
"self",
".",
"expand_tot",
"=",
"self",
".",
"vol_tot",
"/",
"vol_tot_orig",
"# Use bootstrapping to determine the volume expansion factor.",
"if",
"bootstrap",
">",
"0",
":",
"# If provided, compute bootstraps in parallel using a pool.",
"if",
"pool",
"is",
"None",
":",
"M",
"=",
"map",
"else",
":",
"M",
"=",
"pool",
".",
"map",
"ps",
"=",
"[",
"points",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"pvs",
"=",
"[",
"pointvol",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"vds",
"=",
"[",
"vol_dec",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"vcs",
"=",
"[",
"vol_check",
"for",
"it",
"in",
"range",
"(",
"bootstrap",
")",
"]",
"args",
"=",
"zip",
"(",
"ps",
",",
"pvs",
",",
"vds",
",",
"vcs",
")",
"expands",
"=",
"list",
"(",
"M",
"(",
"_ellipsoids_bootstrap_expand",
",",
"args",
")",
")",
"# Conservatively set the expansion factor to be the maximum",
"# factor derived from our set of bootstraps.",
"expand",
"=",
"max",
"(",
"expands",
")",
"# If our ellipsoids are overly constrained, expand them.",
"if",
"expand",
">",
"1.",
":",
"vs",
"=",
"self",
".",
"vols",
"*",
"expand",
"**",
"ndim",
"self",
".",
"scale_to_vols",
"(",
"vs",
")",
"# Estimate the volume and fractional overlap with the unit cube",
"# using Monte Carlo integration.",
"if",
"mc_integrate",
":",
"self",
".",
"vol",
",",
"self",
".",
"funit",
"=",
"self",
".",
"monte_carlo_vol",
"(",
"return_overlap",
"=",
"True",
")"
] | 39.07619 | 0.000713 | [
"def update(self, points, pointvol=0., vol_dec=0.5, vol_check=2.,\n",
" rstate=None, bootstrap=0, pool=None, mc_integrate=False):\n",
" \"\"\"\n",
" Update the set of ellipsoids to bound the collection of points.\n",
"\n",
" Parameters\n",
" ----------\n",
" points : `~numpy.ndarray` with shape (npoints, ndim)\n",
" The set of points to bound.\n",
"\n",
" pointvol : float, optional\n",
" The minimum volume associated with each point. Default is `0.`.\n",
"\n",
" vol_dec : float, optional\n",
" The required fractional reduction in volume after splitting\n",
" an ellipsoid in order to to accept the split.\n",
" Default is `0.5`.\n",
"\n",
" vol_check : float, optional\n",
" The factor used when checking if the volume of the original\n",
" bounding ellipsoid is large enough to warrant `> 2` splits\n",
" via `ell.vol > vol_check * nlive * pointvol`.\n",
" Default is `2.0`.\n",
"\n",
" rstate : `~numpy.random.RandomState`, optional\n",
" `~numpy.random.RandomState` instance.\n",
"\n",
" bootstrap : int, optional\n",
" The number of bootstrapped realizations of the ellipsoids. The\n",
" maximum distance to the set of points \"left out\" during each\n",
" iteration is used to enlarge the resulting volumes.\n",
" Default is `0`.\n",
"\n",
" pool : user-provided pool, optional\n",
" Use this pool of workers to execute operations in parallel.\n",
"\n",
" mc_integrate : bool, optional\n",
" Whether to use Monte Carlo methods to compute the effective\n",
" volume and fractional overlap of the final union of ellipsoids\n",
" with the unit cube. Default is `False`.\n",
"\n",
" \"\"\"\n",
"\n",
" if rstate is None:\n",
" rstate = np.random\n",
"\n",
" if not HAVE_KMEANS:\n",
" raise ValueError(\"scipy.cluster.vq.kmeans2 is required \"\n",
" \"to compute ellipsoid decompositions.\")\n",
"\n",
" npoints, ndim = points.shape\n",
"\n",
" # Calculate the bounding ellipsoid for the points, possibly\n",
" # enlarged to a minimum volume.\n",
" firstell = bounding_ellipsoid(points, pointvol=pointvol)\n",
"\n",
" # Recursively split the bounding ellipsoid using `vol_check`\n",
" # until the volume of each split no longer decreases by a\n",
" # factor of `vol_dec`.\n",
" ells = _bounding_ellipsoids(points, firstell, pointvol=pointvol,\n",
" vol_dec=vol_dec, vol_check=vol_check)\n",
"\n",
" # Update the set of ellipsoids.\n",
" self.nells = len(ells)\n",
" self.ells = ells\n",
" self.ctrs = np.array([ell.ctr for ell in self.ells])\n",
" self.covs = np.array([ell.cov for ell in self.ells])\n",
" self.ams = np.array([ell.am for ell in self.ells])\n",
" self.vols = np.array([ell.vol for ell in self.ells])\n",
" self.vol_tot = sum(self.vols)\n",
"\n",
" # Compute expansion factor.\n",
" expands = np.array([ell.expand for ell in self.ells])\n",
" vols_orig = self.vols / expands\n",
" vol_tot_orig = sum(vols_orig)\n",
" self.expand_tot = self.vol_tot / vol_tot_orig\n",
"\n",
" # Use bootstrapping to determine the volume expansion factor.\n",
" if bootstrap > 0:\n",
"\n",
" # If provided, compute bootstraps in parallel using a pool.\n",
" if pool is None:\n",
" M = map\n",
" else:\n",
" M = pool.map\n",
" ps = [points for it in range(bootstrap)]\n",
" pvs = [pointvol for it in range(bootstrap)]\n",
" vds = [vol_dec for it in range(bootstrap)]\n",
" vcs = [vol_check for it in range(bootstrap)]\n",
" args = zip(ps, pvs, vds, vcs)\n",
" expands = list(M(_ellipsoids_bootstrap_expand, args))\n",
"\n",
" # Conservatively set the expansion factor to be the maximum\n",
" # factor derived from our set of bootstraps.\n",
" expand = max(expands)\n",
"\n",
" # If our ellipsoids are overly constrained, expand them.\n",
" if expand > 1.:\n",
" vs = self.vols * expand**ndim\n",
" self.scale_to_vols(vs)\n",
"\n",
" # Estimate the volume and fractional overlap with the unit cube\n",
" # using Monte Carlo integration.\n",
" if mc_integrate:\n",
" self.vol, self.funit = self.monte_carlo_vol(return_overlap=True)"
] | [
0,
0.0136986301369863,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105
] | 105 | 0.001049 |
def get(self, name, return_json=False, quiet=False):
'''get is a list for a single instance. It is assumed to be running,
and we need to look up the PID, etc.
'''
from spython.utils import check_install
check_install()
# Ensure compatible for singularity prior to 3.0, and after 3.0
subgroup = "instance.list"
if 'version 3' in self.version():
subgroup = ["instance", "list"]
cmd = self._init_command(subgroup)
cmd.append(name)
output = run_command(cmd, quiet=True)
# Success, we have instances
if output['return_code'] == 0:
# Only print the table if we are returning json
if quiet is False:
print(''.join(output['message']))
# Prepare json result from table
header = ['daemon_name','pid','container_image']
instances = parse_table(output['message'][0], header)
# Does the user want instance objects instead?
listing = []
if return_json is False:
for i in instances:
new_instance = Instance(pid=i['pid'],
name=i['daemon_name'],
image=i['container_image'],
start=False)
listing.append(new_instance)
instances = listing
# Couldn't get UID
elif output['return_code'] == 255:
bot.error("Couldn't get UID")
# Return code of 0
else:
bot.info('No instances found.')
# If we are given a name, return just one
if name is not None and len(instances) == 1:
instances = instances[0]
return instances | [
"def",
"get",
"(",
"self",
",",
"name",
",",
"return_json",
"=",
"False",
",",
"quiet",
"=",
"False",
")",
":",
"from",
"spython",
".",
"utils",
"import",
"check_install",
"check_install",
"(",
")",
"# Ensure compatible for singularity prior to 3.0, and after 3.0",
"subgroup",
"=",
"\"instance.list\"",
"if",
"'version 3'",
"in",
"self",
".",
"version",
"(",
")",
":",
"subgroup",
"=",
"[",
"\"instance\"",
",",
"\"list\"",
"]",
"cmd",
"=",
"self",
".",
"_init_command",
"(",
"subgroup",
")",
"cmd",
".",
"append",
"(",
"name",
")",
"output",
"=",
"run_command",
"(",
"cmd",
",",
"quiet",
"=",
"True",
")",
"# Success, we have instances",
"if",
"output",
"[",
"'return_code'",
"]",
"==",
"0",
":",
"# Only print the table if we are returning json",
"if",
"quiet",
"is",
"False",
":",
"print",
"(",
"''",
".",
"join",
"(",
"output",
"[",
"'message'",
"]",
")",
")",
"# Prepare json result from table",
"header",
"=",
"[",
"'daemon_name'",
",",
"'pid'",
",",
"'container_image'",
"]",
"instances",
"=",
"parse_table",
"(",
"output",
"[",
"'message'",
"]",
"[",
"0",
"]",
",",
"header",
")",
"# Does the user want instance objects instead?",
"listing",
"=",
"[",
"]",
"if",
"return_json",
"is",
"False",
":",
"for",
"i",
"in",
"instances",
":",
"new_instance",
"=",
"Instance",
"(",
"pid",
"=",
"i",
"[",
"'pid'",
"]",
",",
"name",
"=",
"i",
"[",
"'daemon_name'",
"]",
",",
"image",
"=",
"i",
"[",
"'container_image'",
"]",
",",
"start",
"=",
"False",
")",
"listing",
".",
"append",
"(",
"new_instance",
")",
"instances",
"=",
"listing",
"# Couldn't get UID",
"elif",
"output",
"[",
"'return_code'",
"]",
"==",
"255",
":",
"bot",
".",
"error",
"(",
"\"Couldn't get UID\"",
")",
"# Return code of 0",
"else",
":",
"bot",
".",
"info",
"(",
"'No instances found.'",
")",
"# If we are given a name, return just one",
"if",
"name",
"is",
"not",
"None",
"and",
"len",
"(",
"instances",
")",
"==",
"1",
":",
"instances",
"=",
"instances",
"[",
"0",
"]",
"return",
"instances"
] | 28.473684 | 0.002382 | [
"def get(self, name, return_json=False, quiet=False):\n",
" '''get is a list for a single instance. It is assumed to be running,\n",
" and we need to look up the PID, etc.\n",
" '''\n",
" from spython.utils import check_install\n",
" check_install()\n",
"\n",
" # Ensure compatible for singularity prior to 3.0, and after 3.0\n",
" subgroup = \"instance.list\"\n",
"\n",
" if 'version 3' in self.version():\n",
" subgroup = [\"instance\", \"list\"]\n",
"\n",
" cmd = self._init_command(subgroup)\n",
"\n",
" cmd.append(name)\n",
" output = run_command(cmd, quiet=True)\n",
"\n",
" # Success, we have instances\n",
"\n",
" if output['return_code'] == 0:\n",
"\n",
" # Only print the table if we are returning json\n",
" if quiet is False:\n",
" print(''.join(output['message']))\n",
"\n",
" # Prepare json result from table\n",
"\n",
" header = ['daemon_name','pid','container_image']\n",
" instances = parse_table(output['message'][0], header)\n",
"\n",
" # Does the user want instance objects instead?\n",
" listing = []\n",
" if return_json is False:\n",
" for i in instances:\n",
" new_instance = Instance(pid=i['pid'],\n",
" name=i['daemon_name'],\n",
" image=i['container_image'],\n",
" start=False)\n",
"\n",
" listing.append(new_instance)\n",
" instances = listing\n",
"\n",
" # Couldn't get UID\n",
"\n",
" elif output['return_code'] == 255:\n",
" bot.error(\"Couldn't get UID\")\n",
" \n",
" # Return code of 0\n",
" else:\n",
" bot.info('No instances found.')\n",
"\n",
" # If we are given a name, return just one\n",
" if name is not None and len(instances) == 1:\n",
" instances = instances[0]\n",
"\n",
" return instances"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03508771929824561,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 57 | 0.003442 |
def removed(name, updates=None):
'''
Ensure Microsoft Updates are uninstalled.
Args:
name (str):
The identifier of a single update to uninstall.
updates (list):
A list of identifiers for updates to be removed. Overrides ``name``.
Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
to ensure you're uninstalling the correct update.
.. warning:: Using a partial KB number or a partial Title could result in
more than one update being removed.
Returns:
dict: A dictionary containing the results of the removal
CLI Example:
.. code-block:: yaml
# using a GUID
uninstall_update:
wua.removed:
- name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211
# using a KB
uninstall_update:
wua.removed:
- name: KB3194343
# using the full Title
uninstall_update:
wua.removed:
- name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)
# Install multiple updates
uninstall_updates:
wua.removed:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
'''
if isinstance(updates, six.string_types):
updates = [updates]
if not updates:
updates = name
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for updates
updates = wua.search(updates)
# No updates found
if updates.count() == 0:
ret['comment'] = 'No updates found'
return ret
# List of updates to uninstall
uninstall = salt.utils.win_update.Updates()
removed_updates = []
for item in updates.updates:
if salt.utils.data.is_true(item.IsInstalled):
uninstall.updates.Add(item)
else:
removed_updates.extend('KB' + kb for kb in item.KBArticleIDs)
if uninstall.count() == 0:
ret['comment'] = 'Updates already removed: '
ret['comment'] += '\n - '.join(removed_updates)
return ret
# Return comment of changes if test.
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Updates will be removed:'
for update in uninstall.updates:
ret['comment'] += '\n'
ret['comment'] += ': '.join(
[update.Identity.UpdateID, update.Title])
return ret
# Install updates
wua.uninstall(uninstall)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# Verify the installation
for item in uninstall.list():
if salt.utils.data.is_true(post_info[item]['Installed']):
ret['changes']['failed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'KBs': post_info[item]['KBs']}
}
ret['result'] = False
else:
ret['changes']['removed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'NeedsReboot': post_info[item]['NeedsReboot'],
'KBs': post_info[item]['KBs']}
}
if ret['changes'].get('failed', False):
ret['comment'] = 'Updates failed'
else:
ret['comment'] = 'Updates removed successfully'
return ret | [
"def",
"removed",
"(",
"name",
",",
"updates",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"updates",
",",
"six",
".",
"string_types",
")",
":",
"updates",
"=",
"[",
"updates",
"]",
"if",
"not",
"updates",
":",
"updates",
"=",
"name",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
"}",
"wua",
"=",
"salt",
".",
"utils",
".",
"win_update",
".",
"WindowsUpdateAgent",
"(",
")",
"# Search for updates",
"updates",
"=",
"wua",
".",
"search",
"(",
"updates",
")",
"# No updates found",
"if",
"updates",
".",
"count",
"(",
")",
"==",
"0",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'No updates found'",
"return",
"ret",
"# List of updates to uninstall",
"uninstall",
"=",
"salt",
".",
"utils",
".",
"win_update",
".",
"Updates",
"(",
")",
"removed_updates",
"=",
"[",
"]",
"for",
"item",
"in",
"updates",
".",
"updates",
":",
"if",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"item",
".",
"IsInstalled",
")",
":",
"uninstall",
".",
"updates",
".",
"Add",
"(",
"item",
")",
"else",
":",
"removed_updates",
".",
"extend",
"(",
"'KB'",
"+",
"kb",
"for",
"kb",
"in",
"item",
".",
"KBArticleIDs",
")",
"if",
"uninstall",
".",
"count",
"(",
")",
"==",
"0",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Updates already removed: '",
"ret",
"[",
"'comment'",
"]",
"+=",
"'\\n - '",
".",
"join",
"(",
"removed_updates",
")",
"return",
"ret",
"# Return comment of changes if test.",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Updates will be removed:'",
"for",
"update",
"in",
"uninstall",
".",
"updates",
":",
"ret",
"[",
"'comment'",
"]",
"+=",
"'\\n'",
"ret",
"[",
"'comment'",
"]",
"+=",
"': '",
".",
"join",
"(",
"[",
"update",
".",
"Identity",
".",
"UpdateID",
",",
"update",
".",
"Title",
"]",
")",
"return",
"ret",
"# Install updates",
"wua",
".",
"uninstall",
"(",
"uninstall",
")",
"# Refresh windows update info",
"wua",
".",
"refresh",
"(",
")",
"post_info",
"=",
"wua",
".",
"updates",
"(",
")",
".",
"list",
"(",
")",
"# Verify the installation",
"for",
"item",
"in",
"uninstall",
".",
"list",
"(",
")",
":",
"if",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"post_info",
"[",
"item",
"]",
"[",
"'Installed'",
"]",
")",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'failed'",
"]",
"=",
"{",
"item",
":",
"{",
"'Title'",
":",
"post_info",
"[",
"item",
"]",
"[",
"'Title'",
"]",
"[",
":",
"40",
"]",
"+",
"'...'",
",",
"'KBs'",
":",
"post_info",
"[",
"item",
"]",
"[",
"'KBs'",
"]",
"}",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'removed'",
"]",
"=",
"{",
"item",
":",
"{",
"'Title'",
":",
"post_info",
"[",
"item",
"]",
"[",
"'Title'",
"]",
"[",
":",
"40",
"]",
"+",
"'...'",
",",
"'NeedsReboot'",
":",
"post_info",
"[",
"item",
"]",
"[",
"'NeedsReboot'",
"]",
",",
"'KBs'",
":",
"post_info",
"[",
"item",
"]",
"[",
"'KBs'",
"]",
"}",
"}",
"if",
"ret",
"[",
"'changes'",
"]",
".",
"get",
"(",
"'failed'",
",",
"False",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Updates failed'",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Updates removed successfully'",
"return",
"ret"
] | 28.52459 | 0.000833 | [
"def removed(name, updates=None):\n",
" '''\n",
" Ensure Microsoft Updates are uninstalled.\n",
"\n",
" Args:\n",
"\n",
" name (str):\n",
" The identifier of a single update to uninstall.\n",
"\n",
" updates (list):\n",
" A list of identifiers for updates to be removed. Overrides ``name``.\n",
" Default is None.\n",
"\n",
" .. note:: Identifiers can be the GUID, the KB number, or any part of the\n",
" Title of the Microsoft update. GUIDs and KBs are the preferred method\n",
" to ensure you're uninstalling the correct update.\n",
"\n",
" .. warning:: Using a partial KB number or a partial Title could result in\n",
" more than one update being removed.\n",
"\n",
" Returns:\n",
" dict: A dictionary containing the results of the removal\n",
"\n",
" CLI Example:\n",
"\n",
" .. code-block:: yaml\n",
"\n",
" # using a GUID\n",
" uninstall_update:\n",
" wua.removed:\n",
" - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n",
"\n",
" # using a KB\n",
" uninstall_update:\n",
" wua.removed:\n",
" - name: KB3194343\n",
"\n",
" # using the full Title\n",
" uninstall_update:\n",
" wua.removed:\n",
" - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)\n",
"\n",
" # Install multiple updates\n",
" uninstall_updates:\n",
" wua.removed:\n",
" - updates:\n",
" - KB3194343\n",
" - 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n",
" '''\n",
" if isinstance(updates, six.string_types):\n",
" updates = [updates]\n",
"\n",
" if not updates:\n",
" updates = name\n",
"\n",
" ret = {'name': name,\n",
" 'changes': {},\n",
" 'result': True,\n",
" 'comment': ''}\n",
"\n",
" wua = salt.utils.win_update.WindowsUpdateAgent()\n",
"\n",
" # Search for updates\n",
" updates = wua.search(updates)\n",
"\n",
" # No updates found\n",
" if updates.count() == 0:\n",
" ret['comment'] = 'No updates found'\n",
" return ret\n",
"\n",
" # List of updates to uninstall\n",
" uninstall = salt.utils.win_update.Updates()\n",
" removed_updates = []\n",
" for item in updates.updates:\n",
" if salt.utils.data.is_true(item.IsInstalled):\n",
" uninstall.updates.Add(item)\n",
" else:\n",
" removed_updates.extend('KB' + kb for kb in item.KBArticleIDs)\n",
"\n",
" if uninstall.count() == 0:\n",
" ret['comment'] = 'Updates already removed: '\n",
" ret['comment'] += '\\n - '.join(removed_updates)\n",
" return ret\n",
"\n",
" # Return comment of changes if test.\n",
" if __opts__['test']:\n",
" ret['result'] = None\n",
" ret['comment'] = 'Updates will be removed:'\n",
" for update in uninstall.updates:\n",
" ret['comment'] += '\\n'\n",
" ret['comment'] += ': '.join(\n",
" [update.Identity.UpdateID, update.Title])\n",
" return ret\n",
"\n",
" # Install updates\n",
" wua.uninstall(uninstall)\n",
"\n",
" # Refresh windows update info\n",
" wua.refresh()\n",
" post_info = wua.updates().list()\n",
"\n",
" # Verify the installation\n",
" for item in uninstall.list():\n",
" if salt.utils.data.is_true(post_info[item]['Installed']):\n",
" ret['changes']['failed'] = {\n",
" item: {'Title': post_info[item]['Title'][:40] + '...',\n",
" 'KBs': post_info[item]['KBs']}\n",
" }\n",
" ret['result'] = False\n",
" else:\n",
" ret['changes']['removed'] = {\n",
" item: {'Title': post_info[item]['Title'][:40] + '...',\n",
" 'NeedsReboot': post_info[item]['NeedsReboot'],\n",
" 'KBs': post_info[item]['KBs']}\n",
" }\n",
"\n",
" if ret['changes'].get('failed', False):\n",
" ret['comment'] = 'Updates failed'\n",
" else:\n",
" ret['comment'] = 'Updates removed successfully'\n",
"\n",
" return ret"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008130081300813009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 122 | 0.000753 |
def alter_edge(self, from_index, to_index, to_jimage=None,
new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError("Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
))
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]['weight'] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop] | [
"def",
"alter_edge",
"(",
"self",
",",
"from_index",
",",
"to_index",
",",
"to_jimage",
"=",
"None",
",",
"new_weight",
"=",
"None",
",",
"new_edge_properties",
"=",
"None",
")",
":",
"existing_edges",
"=",
"self",
".",
"graph",
".",
"get_edge_data",
"(",
"from_index",
",",
"to_index",
")",
"# ensure that edge exists before attempting to change it",
"if",
"not",
"existing_edges",
":",
"raise",
"ValueError",
"(",
"\"Edge between {} and {} cannot be altered;\\\n no edge exists between those sites.\"",
".",
"format",
"(",
"from_index",
",",
"to_index",
")",
")",
"if",
"to_jimage",
"is",
"None",
":",
"edge_index",
"=",
"0",
"else",
":",
"for",
"i",
",",
"properties",
"in",
"existing_edges",
".",
"items",
"(",
")",
":",
"if",
"properties",
"[",
"\"to_jimage\"",
"]",
"==",
"to_jimage",
":",
"edge_index",
"=",
"i",
"if",
"new_weight",
"is",
"not",
"None",
":",
"self",
".",
"graph",
"[",
"from_index",
"]",
"[",
"to_index",
"]",
"[",
"edge_index",
"]",
"[",
"'weight'",
"]",
"=",
"new_weight",
"if",
"new_edge_properties",
"is",
"not",
"None",
":",
"for",
"prop",
"in",
"list",
"(",
"new_edge_properties",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"graph",
"[",
"from_index",
"]",
"[",
"to_index",
"]",
"[",
"edge_index",
"]",
"[",
"prop",
"]",
"=",
"new_edge_properties",
"[",
"prop",
"]"
] | 40.5 | 0.002296 | [
"def alter_edge(self, from_index, to_index, to_jimage=None,\n",
" new_weight=None, new_edge_properties=None):\n",
" \"\"\"\n",
" Alters either the weight or the edge_properties of\n",
" an edge in the StructureGraph.\n",
"\n",
" :param from_index: int\n",
" :param to_index: int\n",
" :param to_jimage: tuple\n",
" :param new_weight: alter_edge does not require\n",
" that weight be altered. As such, by default, this\n",
" is None. If weight is to be changed, it should be a\n",
" float.\n",
" :param new_edge_properties: alter_edge does not require\n",
" that edge_properties be altered. As such, by default,\n",
" this is None. If any edge properties are to be changed,\n",
" it should be a dictionary of edge properties to be changed.\n",
" :return:\n",
" \"\"\"\n",
"\n",
" existing_edges = self.graph.get_edge_data(from_index, to_index)\n",
"\n",
" # ensure that edge exists before attempting to change it\n",
" if not existing_edges:\n",
" raise ValueError(\"Edge between {} and {} cannot be altered;\\\n",
" no edge exists between those sites.\".format(\n",
" from_index, to_index\n",
" ))\n",
"\n",
" if to_jimage is None:\n",
" edge_index = 0\n",
" else:\n",
" for i, properties in existing_edges.items():\n",
" if properties[\"to_jimage\"] == to_jimage:\n",
" edge_index = i\n",
"\n",
" if new_weight is not None:\n",
" self.graph[from_index][to_index][edge_index]['weight'] = new_weight\n",
"\n",
" if new_edge_properties is not None:\n",
" for prop in list(new_edge_properties.keys()):\n",
" self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]"
] | [
0,
0.015873015873015872,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085
] | 42 | 0.002869 |
def render_registration(self):
'''
Render pinned points on video frame as red rectangle.
'''
surface = self.get_surface()
if self.canvas is None or self.df_canvas_corners.shape[0] == 0:
return surface
corners = self.df_canvas_corners.copy()
corners['w'] = 1
transform = self.canvas.shapes_to_canvas_transform
canvas_corners = corners.values.dot(transform.T.values).T
points_x = canvas_corners[0]
points_y = canvas_corners[1]
cairo_context = cairo.Context(surface)
cairo_context.move_to(points_x[0], points_y[0])
for x, y in zip(points_x[1:], points_y[1:]):
cairo_context.line_to(x, y)
cairo_context.line_to(points_x[0], points_y[0])
cairo_context.set_source_rgb(1, 0, 0)
cairo_context.stroke()
return surface | [
"def",
"render_registration",
"(",
"self",
")",
":",
"surface",
"=",
"self",
".",
"get_surface",
"(",
")",
"if",
"self",
".",
"canvas",
"is",
"None",
"or",
"self",
".",
"df_canvas_corners",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"return",
"surface",
"corners",
"=",
"self",
".",
"df_canvas_corners",
".",
"copy",
"(",
")",
"corners",
"[",
"'w'",
"]",
"=",
"1",
"transform",
"=",
"self",
".",
"canvas",
".",
"shapes_to_canvas_transform",
"canvas_corners",
"=",
"corners",
".",
"values",
".",
"dot",
"(",
"transform",
".",
"T",
".",
"values",
")",
".",
"T",
"points_x",
"=",
"canvas_corners",
"[",
"0",
"]",
"points_y",
"=",
"canvas_corners",
"[",
"1",
"]",
"cairo_context",
"=",
"cairo",
".",
"Context",
"(",
"surface",
")",
"cairo_context",
".",
"move_to",
"(",
"points_x",
"[",
"0",
"]",
",",
"points_y",
"[",
"0",
"]",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"points_x",
"[",
"1",
":",
"]",
",",
"points_y",
"[",
"1",
":",
"]",
")",
":",
"cairo_context",
".",
"line_to",
"(",
"x",
",",
"y",
")",
"cairo_context",
".",
"line_to",
"(",
"points_x",
"[",
"0",
"]",
",",
"points_y",
"[",
"0",
"]",
")",
"cairo_context",
".",
"set_source_rgb",
"(",
"1",
",",
"0",
",",
"0",
")",
"cairo_context",
".",
"stroke",
"(",
")",
"return",
"surface"
] | 34.24 | 0.002273 | [
"def render_registration(self):\n",
" '''\n",
" Render pinned points on video frame as red rectangle.\n",
" '''\n",
" surface = self.get_surface()\n",
" if self.canvas is None or self.df_canvas_corners.shape[0] == 0:\n",
" return surface\n",
"\n",
" corners = self.df_canvas_corners.copy()\n",
" corners['w'] = 1\n",
"\n",
" transform = self.canvas.shapes_to_canvas_transform\n",
" canvas_corners = corners.values.dot(transform.T.values).T\n",
"\n",
" points_x = canvas_corners[0]\n",
" points_y = canvas_corners[1]\n",
"\n",
" cairo_context = cairo.Context(surface)\n",
" cairo_context.move_to(points_x[0], points_y[0])\n",
" for x, y in zip(points_x[1:], points_y[1:]):\n",
" cairo_context.line_to(x, y)\n",
" cairo_context.line_to(points_x[0], points_y[0])\n",
" cairo_context.set_source_rgb(1, 0, 0)\n",
" cairo_context.stroke()\n",
" return surface"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 25 | 0.005152 |
def completed_number(prefix, length):
"""
'prefix' is the start of the CC number as a string, any number of digits.
'length' is the length of the CC number to generate. Typically 13 or 16
"""
ccnumber = prefix
# generate digits
while len(ccnumber) < (length - 1):
digit = random.choice(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
ccnumber.append(digit)
# Calculate sum
sum = 0
pos = 0
reversedCCnumber = []
reversedCCnumber.extend(ccnumber)
reversedCCnumber.reverse()
while pos < length - 1:
odd = int( reversedCCnumber[pos] ) * 2
if odd > 9:
odd -= 9
sum += odd
if pos != (length - 2):
sum += int( reversedCCnumber[pos+1] )
pos += 2
# Calculate check digit
checkdigit = ((sum / 10 + 1) * 10 - sum) % 10
ccnumber.append( str(int(checkdigit)) )
return ''.join(ccnumber) | [
"def",
"completed_number",
"(",
"prefix",
",",
"length",
")",
":",
"ccnumber",
"=",
"prefix",
"# generate digits",
"while",
"len",
"(",
"ccnumber",
")",
"<",
"(",
"length",
"-",
"1",
")",
":",
"digit",
"=",
"random",
".",
"choice",
"(",
"[",
"'0'",
",",
"'1'",
",",
"'2'",
",",
"'3'",
",",
"'4'",
",",
"'5'",
",",
"'6'",
",",
"'7'",
",",
"'8'",
",",
"'9'",
"]",
")",
"ccnumber",
".",
"append",
"(",
"digit",
")",
"# Calculate sum ",
"sum",
"=",
"0",
"pos",
"=",
"0",
"reversedCCnumber",
"=",
"[",
"]",
"reversedCCnumber",
".",
"extend",
"(",
"ccnumber",
")",
"reversedCCnumber",
".",
"reverse",
"(",
")",
"while",
"pos",
"<",
"length",
"-",
"1",
":",
"odd",
"=",
"int",
"(",
"reversedCCnumber",
"[",
"pos",
"]",
")",
"*",
"2",
"if",
"odd",
">",
"9",
":",
"odd",
"-=",
"9",
"sum",
"+=",
"odd",
"if",
"pos",
"!=",
"(",
"length",
"-",
"2",
")",
":",
"sum",
"+=",
"int",
"(",
"reversedCCnumber",
"[",
"pos",
"+",
"1",
"]",
")",
"pos",
"+=",
"2",
"# Calculate check digit",
"checkdigit",
"=",
"(",
"(",
"sum",
"/",
"10",
"+",
"1",
")",
"*",
"10",
"-",
"sum",
")",
"%",
"10",
"ccnumber",
".",
"append",
"(",
"str",
"(",
"int",
"(",
"checkdigit",
")",
")",
")",
"return",
"''",
".",
"join",
"(",
"ccnumber",
")"
] | 32.25 | 0.009677 | [
"def completed_number(prefix, length):\n",
" \"\"\"\n",
" 'prefix' is the start of the CC number as a string, any number of digits.\n",
" 'length' is the length of the CC number to generate. Typically 13 or 16\n",
" \"\"\"\n",
" ccnumber = prefix\n",
" # generate digits\n",
" while len(ccnumber) < (length - 1):\n",
" digit = random.choice(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n",
" ccnumber.append(digit)\n",
" # Calculate sum \n",
" sum = 0\n",
" pos = 0\n",
" reversedCCnumber = []\n",
" reversedCCnumber.extend(ccnumber)\n",
" reversedCCnumber.reverse()\n",
" while pos < length - 1:\n",
" odd = int( reversedCCnumber[pos] ) * 2\n",
" if odd > 9:\n",
" odd -= 9\n",
" sum += odd\n",
" if pos != (length - 2):\n",
" sum += int( reversedCCnumber[pos+1] )\n",
" pos += 2\n",
" # Calculate check digit\n",
" checkdigit = ((sum / 10 + 1) * 10 - sum) % 10\n",
" ccnumber.append( str(int(checkdigit)) )\n",
" return ''.join(ccnumber)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0.0425531914893617,
0,
0,
0,
0,
0.04,
0,
0,
0,
0.045454545454545456,
0.03571428571428571
] | 28 | 0.007978 |
def get_field_info(wrapper,entity_type):
'type: wrapper :atws.Wrapper'
fields = wrapper.new('GetFieldInfo')
fields.psObjectType = entity_type
return wrapper.GetFieldInfo(fields) | [
"def",
"get_field_info",
"(",
"wrapper",
",",
"entity_type",
")",
":",
"fields",
"=",
"wrapper",
".",
"new",
"(",
"'GetFieldInfo'",
")",
"fields",
".",
"psObjectType",
"=",
"entity_type",
"return",
"wrapper",
".",
"GetFieldInfo",
"(",
"fields",
")"
] | 37.8 | 0.010363 | [
"def get_field_info(wrapper,entity_type):\n",
" 'type: wrapper :atws.Wrapper'\n",
" fields = wrapper.new('GetFieldInfo')\n",
" fields.psObjectType = entity_type\n",
" return wrapper.GetFieldInfo(fields)"
] | [
0.024390243902439025,
0,
0,
0,
0.02564102564102564
] | 5 | 0.010006 |
def graph_from_seeds(seeds, cell_source):
"""
This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet
"""
# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph
if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
# when called from ExcelCompiler instance, construct cellmap and graph from seeds
else: # ~ cell_source is a ExcelCompiler
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
# directed graph
G = networkx.DiGraph()
# match the info in cellmap
for c in cellmap.values(): G.add_node(c)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
###### 1) looking for cell c1 dependencies ####################
# print 'C1', c1.address()
# in case a formula, get all cells that are arguments
pystr, ast = cell2code(c1, names)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr.replace('"', "'") # compilation is done later
if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:
if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler
cell_source.pointers.add(c1.address())
# get all the cells/ranges this formula refers to
deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]
# remove dupes
deps = uniqueify(deps)
###### 2) connect dependencies in cells in graph ####################
# ### LOG
# tmp = []
# for dep in deps:
# if dep not in names:
# if "!" not in dep and cursheet != None:
# dep = cursheet + "!" + dep
# if dep not in cellmap:
# tmp.append(dep)
# #deps = tmp
# logStep = "%s %s = %s " % ('|'*step, c1.address(), '',)
# print logStep
# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):
# print logStep, "[%s...%s]" % (deps[0], deps[-1])
# elif len(deps) > 0:
# print logStep, "->", deps
# else:
# print logStep, "done"
for dep in deps:
dep_name = dep.tvalue.replace('$','')
# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError
if dep_name.startswith(':') or dep_name.endswith(':'):
dep_name = dep_name.replace(':', '')
# if not pointer, we need an absolute address
if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None:
dep_name = cursheet + "!" + dep_name
# Named_ranges + ranges already parsed (previous iterations)
if dep_name in cellmap:
origins = [cellmap[dep_name]]
target = cellmap[c1.address()]
# if the dep_name is a multi-cell range, create a range object
elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if 'OFFSET' in reference or 'INDEX' in reference:
start_end = prepare_pointer(reference, names, ref_cell = c1)
rng = cell_source.Range(start_end)
if dep_name in names: # dep is a pointer range
address = dep_name
else:
if c1.address() in names: # c1 holds is a pointer range
address = c1.address()
else: # a pointer range with no name, its address will be its name
address = '%s:%s' % (start_end["start"], start_end["end"])
cell_source.pointers.add(address)
else:
address = dep_name
# get a list of the addresses in this range that are not yet in the graph
range_addresses = list(resolve_range(reference, should_flatten=True)[0])
cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]
if len(cellmap_add_addresses) > 0:
# this means there are cells to be added
# get row and col dimensions for the sheet, assuming the whole range is in one sheet
sheet_initial = split_address(cellmap_add_addresses[0])[0]
max_rows, max_cols = max_dimension(cellmap, sheet_initial)
# create empty cells that aren't in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
# if somehow a new sheet comes up in the range, get the new dimensions
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
# add the empty cells
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
# only add cells within the maximum bounds of the sheet to avoid too many evaluations
# for A:A or 1:1 ranges
cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object
cellmap[addr] = cell_new # add it to the cellmap
G.add_node(cell_new) # add it to the graph
cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
# save the range
cellmap[address] = virtual_cell
# add an edge from the range to the parent
G.add_node(virtual_cell)
# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1
G.add_edge(virtual_cell, c1)
# cells in the range should point to the range as their parent
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...
for child in rng.addresses:
if child not in cellmap:
origins.append(cells[child])
else:
origins.append(cellmap[child])
else:
# not a range
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if reference in cells:
if dep_name in names:
virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )
G.add_node(virtual_cell)
G.add_edge(cells[reference], virtual_cell)
origins = [virtual_cell]
else:
cell = cells[reference]
origins = [cell]
cell = origins[0]
if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
# process each cell
for c2 in flatten(origins):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
steps.append(step+1)
else:
# constant cell, no need for further processing, just remember to set the code
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
G.add_node(c2)
# add an edge from the cell to the parent (range or cell)
if(target != []):
# print "Adding edge %s --> %s" % (c2.address(), target.address())
G.add_edge(c2,target)
c1.compile() # cell compilation is done here because pointer ranges might update python_expressions
return (cellmap, G) | [
"def",
"graph_from_seeds",
"(",
"seeds",
",",
"cell_source",
")",
":",
"# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph",
"if",
"hasattr",
"(",
"cell_source",
",",
"'G'",
")",
":",
"# ~ cell_source is a Spreadsheet",
"cellmap",
"=",
"cell_source",
".",
"cellmap",
"cells",
"=",
"cellmap",
"G",
"=",
"cell_source",
".",
"G",
"for",
"c",
"in",
"seeds",
":",
"G",
".",
"add_node",
"(",
"c",
")",
"cellmap",
"[",
"c",
".",
"address",
"(",
")",
"]",
"=",
"c",
"# when called from ExcelCompiler instance, construct cellmap and graph from seeds",
"else",
":",
"# ~ cell_source is a ExcelCompiler",
"cellmap",
"=",
"dict",
"(",
"[",
"(",
"x",
".",
"address",
"(",
")",
",",
"x",
")",
"for",
"x",
"in",
"seeds",
"]",
")",
"cells",
"=",
"cell_source",
".",
"cells",
"# directed graph",
"G",
"=",
"networkx",
".",
"DiGraph",
"(",
")",
"# match the info in cellmap",
"for",
"c",
"in",
"cellmap",
".",
"values",
"(",
")",
":",
"G",
".",
"add_node",
"(",
"c",
")",
"# cells to analyze: only formulas",
"todo",
"=",
"[",
"s",
"for",
"s",
"in",
"seeds",
"if",
"s",
".",
"formula",
"]",
"steps",
"=",
"[",
"i",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"todo",
")",
"]",
"names",
"=",
"cell_source",
".",
"named_ranges",
"while",
"todo",
":",
"c1",
"=",
"todo",
".",
"pop",
"(",
")",
"step",
"=",
"steps",
".",
"pop",
"(",
")",
"cursheet",
"=",
"c1",
".",
"sheet",
"###### 1) looking for cell c1 dependencies ####################",
"# print 'C1', c1.address()",
"# in case a formula, get all cells that are arguments",
"pystr",
",",
"ast",
"=",
"cell2code",
"(",
"c1",
",",
"names",
")",
"# set the code & compile it (will flag problems sooner rather than later)",
"c1",
".",
"python_expression",
"=",
"pystr",
".",
"replace",
"(",
"'\"'",
",",
"\"'\"",
")",
"# compilation is done later",
"if",
"'OFFSET'",
"in",
"c1",
".",
"formula",
"or",
"'INDEX'",
"in",
"c1",
".",
"formula",
":",
"if",
"c1",
".",
"address",
"(",
")",
"not",
"in",
"cell_source",
".",
"named_ranges",
":",
"# pointers names already treated in ExcelCompiler",
"cell_source",
".",
"pointers",
".",
"add",
"(",
"c1",
".",
"address",
"(",
")",
")",
"# get all the cells/ranges this formula refers to",
"deps",
"=",
"[",
"x",
"for",
"x",
"in",
"ast",
".",
"nodes",
"(",
")",
"if",
"isinstance",
"(",
"x",
",",
"RangeNode",
")",
"]",
"# remove dupes",
"deps",
"=",
"uniqueify",
"(",
"deps",
")",
"###### 2) connect dependencies in cells in graph ####################",
"# ### LOG",
"# tmp = []",
"# for dep in deps:",
"# if dep not in names:",
"# if \"!\" not in dep and cursheet != None:",
"# dep = cursheet + \"!\" + dep",
"# if dep not in cellmap:",
"# tmp.append(dep)",
"# #deps = tmp",
"# logStep = \"%s %s = %s \" % ('|'*step, c1.address(), '',)",
"# print logStep",
"# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):",
"# print logStep, \"[%s...%s]\" % (deps[0], deps[-1])",
"# elif len(deps) > 0:",
"# print logStep, \"->\", deps",
"# else:",
"# print logStep, \"done\"",
"for",
"dep",
"in",
"deps",
":",
"dep_name",
"=",
"dep",
".",
"tvalue",
".",
"replace",
"(",
"'$'",
",",
"''",
")",
"# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError",
"if",
"dep_name",
".",
"startswith",
"(",
"':'",
")",
"or",
"dep_name",
".",
"endswith",
"(",
"':'",
")",
":",
"dep_name",
"=",
"dep_name",
".",
"replace",
"(",
"':'",
",",
"''",
")",
"# if not pointer, we need an absolute address",
"if",
"dep",
".",
"tsubtype",
"!=",
"'pointer'",
"and",
"dep_name",
"not",
"in",
"names",
"and",
"\"!\"",
"not",
"in",
"dep_name",
"and",
"cursheet",
"!=",
"None",
":",
"dep_name",
"=",
"cursheet",
"+",
"\"!\"",
"+",
"dep_name",
"# Named_ranges + ranges already parsed (previous iterations)",
"if",
"dep_name",
"in",
"cellmap",
":",
"origins",
"=",
"[",
"cellmap",
"[",
"dep_name",
"]",
"]",
"target",
"=",
"cellmap",
"[",
"c1",
".",
"address",
"(",
")",
"]",
"# if the dep_name is a multi-cell range, create a range object",
"elif",
"is_range",
"(",
"dep_name",
")",
"or",
"(",
"dep_name",
"in",
"names",
"and",
"is_range",
"(",
"names",
"[",
"dep_name",
"]",
")",
")",
":",
"if",
"dep_name",
"in",
"names",
":",
"reference",
"=",
"names",
"[",
"dep_name",
"]",
"else",
":",
"reference",
"=",
"dep_name",
"if",
"'OFFSET'",
"in",
"reference",
"or",
"'INDEX'",
"in",
"reference",
":",
"start_end",
"=",
"prepare_pointer",
"(",
"reference",
",",
"names",
",",
"ref_cell",
"=",
"c1",
")",
"rng",
"=",
"cell_source",
".",
"Range",
"(",
"start_end",
")",
"if",
"dep_name",
"in",
"names",
":",
"# dep is a pointer range",
"address",
"=",
"dep_name",
"else",
":",
"if",
"c1",
".",
"address",
"(",
")",
"in",
"names",
":",
"# c1 holds is a pointer range",
"address",
"=",
"c1",
".",
"address",
"(",
")",
"else",
":",
"# a pointer range with no name, its address will be its name",
"address",
"=",
"'%s:%s'",
"%",
"(",
"start_end",
"[",
"\"start\"",
"]",
",",
"start_end",
"[",
"\"end\"",
"]",
")",
"cell_source",
".",
"pointers",
".",
"add",
"(",
"address",
")",
"else",
":",
"address",
"=",
"dep_name",
"# get a list of the addresses in this range that are not yet in the graph",
"range_addresses",
"=",
"list",
"(",
"resolve_range",
"(",
"reference",
",",
"should_flatten",
"=",
"True",
")",
"[",
"0",
"]",
")",
"cellmap_add_addresses",
"=",
"[",
"addr",
"for",
"addr",
"in",
"range_addresses",
"if",
"addr",
"not",
"in",
"cellmap",
".",
"keys",
"(",
")",
"]",
"if",
"len",
"(",
"cellmap_add_addresses",
")",
">",
"0",
":",
"# this means there are cells to be added",
"# get row and col dimensions for the sheet, assuming the whole range is in one sheet",
"sheet_initial",
"=",
"split_address",
"(",
"cellmap_add_addresses",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"max_rows",
",",
"max_cols",
"=",
"max_dimension",
"(",
"cellmap",
",",
"sheet_initial",
")",
"# create empty cells that aren't in the cellmap",
"for",
"addr",
"in",
"cellmap_add_addresses",
":",
"sheet_new",
",",
"col_new",
",",
"row_new",
"=",
"split_address",
"(",
"addr",
")",
"# if somehow a new sheet comes up in the range, get the new dimensions",
"if",
"sheet_new",
"!=",
"sheet_initial",
":",
"sheet_initial",
"=",
"sheet_new",
"max_rows",
",",
"max_cols",
"=",
"max_dimension",
"(",
"cellmap",
",",
"sheet_new",
")",
"# add the empty cells",
"if",
"int",
"(",
"row_new",
")",
"<=",
"max_rows",
"and",
"int",
"(",
"col2num",
"(",
"col_new",
")",
")",
"<=",
"max_cols",
":",
"# only add cells within the maximum bounds of the sheet to avoid too many evaluations",
"# for A:A or 1:1 ranges",
"cell_new",
"=",
"Cell",
"(",
"addr",
",",
"sheet_new",
",",
"value",
"=",
"\"\"",
",",
"should_eval",
"=",
"'False'",
")",
"# create new cell object",
"cellmap",
"[",
"addr",
"]",
"=",
"cell_new",
"# add it to the cellmap",
"G",
".",
"add_node",
"(",
"cell_new",
")",
"# add it to the graph",
"cell_source",
".",
"cells",
"[",
"addr",
"]",
"=",
"cell_new",
"# add it to the cell_source, used in this function",
"rng",
"=",
"cell_source",
".",
"Range",
"(",
"reference",
")",
"if",
"address",
"in",
"cellmap",
":",
"virtual_cell",
"=",
"cellmap",
"[",
"address",
"]",
"else",
":",
"virtual_cell",
"=",
"Cell",
"(",
"address",
",",
"None",
",",
"value",
"=",
"rng",
",",
"formula",
"=",
"reference",
",",
"is_range",
"=",
"True",
",",
"is_named_range",
"=",
"True",
")",
"# save the range",
"cellmap",
"[",
"address",
"]",
"=",
"virtual_cell",
"# add an edge from the range to the parent",
"G",
".",
"add_node",
"(",
"virtual_cell",
")",
"# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1",
"G",
".",
"add_edge",
"(",
"virtual_cell",
",",
"c1",
")",
"# cells in the range should point to the range as their parent",
"target",
"=",
"virtual_cell",
"origins",
"=",
"[",
"]",
"if",
"len",
"(",
"list",
"(",
"rng",
".",
"keys",
"(",
")",
")",
")",
"!=",
"0",
":",
"# could be better, but can't check on Exception types here...",
"for",
"child",
"in",
"rng",
".",
"addresses",
":",
"if",
"child",
"not",
"in",
"cellmap",
":",
"origins",
".",
"append",
"(",
"cells",
"[",
"child",
"]",
")",
"else",
":",
"origins",
".",
"append",
"(",
"cellmap",
"[",
"child",
"]",
")",
"else",
":",
"# not a range",
"if",
"dep_name",
"in",
"names",
":",
"reference",
"=",
"names",
"[",
"dep_name",
"]",
"else",
":",
"reference",
"=",
"dep_name",
"if",
"reference",
"in",
"cells",
":",
"if",
"dep_name",
"in",
"names",
":",
"virtual_cell",
"=",
"Cell",
"(",
"dep_name",
",",
"None",
",",
"value",
"=",
"cells",
"[",
"reference",
"]",
".",
"value",
",",
"formula",
"=",
"reference",
",",
"is_range",
"=",
"False",
",",
"is_named_range",
"=",
"True",
")",
"G",
".",
"add_node",
"(",
"virtual_cell",
")",
"G",
".",
"add_edge",
"(",
"cells",
"[",
"reference",
"]",
",",
"virtual_cell",
")",
"origins",
"=",
"[",
"virtual_cell",
"]",
"else",
":",
"cell",
"=",
"cells",
"[",
"reference",
"]",
"origins",
"=",
"[",
"cell",
"]",
"cell",
"=",
"origins",
"[",
"0",
"]",
"if",
"cell",
".",
"formula",
"is",
"not",
"None",
"and",
"(",
"'OFFSET'",
"in",
"cell",
".",
"formula",
"or",
"'INDEX'",
"in",
"cell",
".",
"formula",
")",
":",
"cell_source",
".",
"pointers",
".",
"add",
"(",
"cell",
".",
"address",
"(",
")",
")",
"else",
":",
"virtual_cell",
"=",
"Cell",
"(",
"dep_name",
",",
"None",
",",
"value",
"=",
"None",
",",
"formula",
"=",
"None",
",",
"is_range",
"=",
"False",
",",
"is_named_range",
"=",
"True",
")",
"origins",
"=",
"[",
"virtual_cell",
"]",
"target",
"=",
"c1",
"# process each cell",
"for",
"c2",
"in",
"flatten",
"(",
"origins",
")",
":",
"# if we havent treated this cell allready",
"if",
"c2",
".",
"address",
"(",
")",
"not",
"in",
"cellmap",
":",
"if",
"c2",
".",
"formula",
":",
"# cell with a formula, needs to be added to the todo list",
"todo",
".",
"append",
"(",
"c2",
")",
"steps",
".",
"append",
"(",
"step",
"+",
"1",
")",
"else",
":",
"# constant cell, no need for further processing, just remember to set the code",
"pystr",
",",
"ast",
"=",
"cell2code",
"(",
"c2",
",",
"names",
")",
"c2",
".",
"python_expression",
"=",
"pystr",
"c2",
".",
"compile",
"(",
")",
"# save in the cellmap",
"cellmap",
"[",
"c2",
".",
"address",
"(",
")",
"]",
"=",
"c2",
"# add to the graph",
"G",
".",
"add_node",
"(",
"c2",
")",
"# add an edge from the cell to the parent (range or cell)",
"if",
"(",
"target",
"!=",
"[",
"]",
")",
":",
"# print \"Adding edge %s --> %s\" % (c2.address(), target.address())",
"G",
".",
"add_edge",
"(",
"c2",
",",
"target",
")",
"c1",
".",
"compile",
"(",
")",
"# cell compilation is done here because pointer ranges might update python_expressions",
"return",
"(",
"cellmap",
",",
"G",
")"
] | 43.918552 | 0.008966 | [
"def graph_from_seeds(seeds, cell_source):\n",
" \"\"\"\n",
" This creates/updates a networkx graph from a list of cells.\n",
"\n",
" The graph is created when the cell_source is an instance of ExcelCompiler\n",
" The graph is updated when the cell_source is an instance of Spreadsheet\n",
" \"\"\"\n",
"\n",
" # when called from Spreadsheet instance, use the Spreadsheet cellmap and graph\n",
" if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet\n",
" cellmap = cell_source.cellmap\n",
" cells = cellmap\n",
" G = cell_source.G\n",
" for c in seeds:\n",
" G.add_node(c)\n",
" cellmap[c.address()] = c\n",
" # when called from ExcelCompiler instance, construct cellmap and graph from seeds\n",
" else: # ~ cell_source is a ExcelCompiler\n",
" cellmap = dict([(x.address(),x) for x in seeds])\n",
" cells = cell_source.cells\n",
" # directed graph\n",
" G = networkx.DiGraph()\n",
" # match the info in cellmap\n",
" for c in cellmap.values(): G.add_node(c)\n",
"\n",
" # cells to analyze: only formulas\n",
" todo = [s for s in seeds if s.formula]\n",
" steps = [i for i,s in enumerate(todo)]\n",
" names = cell_source.named_ranges\n",
"\n",
" while todo:\n",
" c1 = todo.pop()\n",
" step = steps.pop()\n",
" cursheet = c1.sheet\n",
"\n",
" ###### 1) looking for cell c1 dependencies ####################\n",
" # print 'C1', c1.address()\n",
" # in case a formula, get all cells that are arguments\n",
" pystr, ast = cell2code(c1, names)\n",
" # set the code & compile it (will flag problems sooner rather than later)\n",
" c1.python_expression = pystr.replace('\"', \"'\") # compilation is done later\n",
"\n",
" if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:\n",
" if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler\n",
" cell_source.pointers.add(c1.address())\n",
"\n",
" # get all the cells/ranges this formula refers to\n",
" deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]\n",
" # remove dupes\n",
" deps = uniqueify(deps)\n",
"\n",
" ###### 2) connect dependencies in cells in graph ####################\n",
"\n",
" # ### LOG\n",
" # tmp = []\n",
" # for dep in deps:\n",
" # if dep not in names:\n",
" # if \"!\" not in dep and cursheet != None:\n",
" # dep = cursheet + \"!\" + dep\n",
" # if dep not in cellmap:\n",
" # tmp.append(dep)\n",
" # #deps = tmp\n",
" # logStep = \"%s %s = %s \" % ('|'*step, c1.address(), '',)\n",
" # print logStep\n",
"\n",
" # if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):\n",
" # print logStep, \"[%s...%s]\" % (deps[0], deps[-1])\n",
" # elif len(deps) > 0:\n",
" # print logStep, \"->\", deps\n",
" # else:\n",
" # print logStep, \"done\"\n",
"\n",
" for dep in deps:\n",
" dep_name = dep.tvalue.replace('$','')\n",
"\n",
" # this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError\n",
" if dep_name.startswith(':') or dep_name.endswith(':'):\n",
" dep_name = dep_name.replace(':', '')\n",
"\n",
" # if not pointer, we need an absolute address\n",
" if dep.tsubtype != 'pointer' and dep_name not in names and \"!\" not in dep_name and cursheet != None:\n",
" dep_name = cursheet + \"!\" + dep_name\n",
"\n",
" # Named_ranges + ranges already parsed (previous iterations)\n",
" if dep_name in cellmap:\n",
" origins = [cellmap[dep_name]]\n",
" target = cellmap[c1.address()]\n",
" # if the dep_name is a multi-cell range, create a range object\n",
" elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):\n",
" if dep_name in names:\n",
" reference = names[dep_name]\n",
" else:\n",
" reference = dep_name\n",
"\n",
" if 'OFFSET' in reference or 'INDEX' in reference:\n",
" start_end = prepare_pointer(reference, names, ref_cell = c1)\n",
" rng = cell_source.Range(start_end)\n",
"\n",
" if dep_name in names: # dep is a pointer range\n",
" address = dep_name\n",
" else:\n",
" if c1.address() in names: # c1 holds is a pointer range\n",
" address = c1.address()\n",
" else: # a pointer range with no name, its address will be its name\n",
" address = '%s:%s' % (start_end[\"start\"], start_end[\"end\"])\n",
" cell_source.pointers.add(address)\n",
" else:\n",
" address = dep_name\n",
"\n",
" # get a list of the addresses in this range that are not yet in the graph\n",
" range_addresses = list(resolve_range(reference, should_flatten=True)[0])\n",
" cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]\n",
"\n",
" if len(cellmap_add_addresses) > 0:\n",
" # this means there are cells to be added\n",
"\n",
" # get row and col dimensions for the sheet, assuming the whole range is in one sheet\n",
" sheet_initial = split_address(cellmap_add_addresses[0])[0]\n",
" max_rows, max_cols = max_dimension(cellmap, sheet_initial)\n",
"\n",
" # create empty cells that aren't in the cellmap\n",
" for addr in cellmap_add_addresses:\n",
" sheet_new, col_new, row_new = split_address(addr)\n",
"\n",
" # if somehow a new sheet comes up in the range, get the new dimensions\n",
" if sheet_new != sheet_initial:\n",
" sheet_initial = sheet_new\n",
" max_rows, max_cols = max_dimension(cellmap, sheet_new)\n",
"\n",
" # add the empty cells\n",
" if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:\n",
" # only add cells within the maximum bounds of the sheet to avoid too many evaluations\n",
" # for A:A or 1:1 ranges\n",
"\n",
" cell_new = Cell(addr, sheet_new, value=\"\", should_eval='False') # create new cell object\n",
" cellmap[addr] = cell_new # add it to the cellmap\n",
" G.add_node(cell_new) # add it to the graph\n",
" cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function\n",
"\n",
" rng = cell_source.Range(reference)\n",
"\n",
" if address in cellmap:\n",
" virtual_cell = cellmap[address]\n",
" else:\n",
" virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )\n",
" # save the range\n",
" cellmap[address] = virtual_cell\n",
"\n",
" # add an edge from the range to the parent\n",
" G.add_node(virtual_cell)\n",
" # Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1\n",
" G.add_edge(virtual_cell, c1)\n",
" # cells in the range should point to the range as their parent\n",
" target = virtual_cell\n",
" origins = []\n",
"\n",
" if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...\n",
" for child in rng.addresses:\n",
" if child not in cellmap:\n",
" origins.append(cells[child])\n",
" else:\n",
" origins.append(cellmap[child])\n",
" else:\n",
" # not a range\n",
" if dep_name in names:\n",
" reference = names[dep_name]\n",
" else:\n",
" reference = dep_name\n",
"\n",
" if reference in cells:\n",
" if dep_name in names:\n",
" virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )\n",
"\n",
" G.add_node(virtual_cell)\n",
" G.add_edge(cells[reference], virtual_cell)\n",
"\n",
" origins = [virtual_cell]\n",
" else:\n",
" cell = cells[reference]\n",
" origins = [cell]\n",
"\n",
" cell = origins[0]\n",
"\n",
" if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):\n",
" cell_source.pointers.add(cell.address())\n",
" else:\n",
" virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )\n",
" origins = [virtual_cell]\n",
"\n",
" target = c1\n",
"\n",
"\n",
" # process each cell\n",
" for c2 in flatten(origins):\n",
"\n",
" # if we havent treated this cell allready\n",
" if c2.address() not in cellmap:\n",
" if c2.formula:\n",
" # cell with a formula, needs to be added to the todo list\n",
" todo.append(c2)\n",
" steps.append(step+1)\n",
" else:\n",
" # constant cell, no need for further processing, just remember to set the code\n",
" pystr,ast = cell2code(c2, names)\n",
" c2.python_expression = pystr\n",
" c2.compile()\n",
"\n",
" # save in the cellmap\n",
" cellmap[c2.address()] = c2\n",
" # add to the graph\n",
" G.add_node(c2)\n",
"\n",
" # add an edge from the cell to the parent (range or cell)\n",
" if(target != []):\n",
" # print \"Adding edge %s --> %s\" % (c2.address(), target.address())\n",
" G.add_edge(c2,target)\n",
"\n",
" c1.compile() # cell compilation is done here because pointer ranges might update python_expressions\n",
"\n",
"\n",
" return (cellmap, G)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.014925373134328358,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.022222222222222223,
0.017543859649122806,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0.013888888888888888,
0,
0,
0,
0.012195121951219513,
0.024096385542168676,
0,
0,
0.018018018018018018,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0.010638297872340425,
0,
0,
0,
0,
0.017699115044247787,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0.014925373134328358,
0,
0,
0.0125,
0,
0.02197802197802198,
0.011494252873563218,
0,
0,
0,
0,
0.010638297872340425,
0.010752688172043012,
0.009174311926605505,
0,
0,
0,
0,
0.009174311926605505,
0.012048192771084338,
0.012048192771084338,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0.011494252873563218,
0,
0,
0.010526315789473684,
0.00847457627118644,
0,
0,
0.01652892561983471,
0.024691358024691357,
0.013333333333333334,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01834862385321101,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06451612903225806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0.078125,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0.009708737864077669,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.023809523809523808,
0,
0.018518518518518517,
0,
0,
0.08695652173913043
] | 221 | 0.004621 |
def port(alias_name, default=None, allow_none=False):
"""Get the port from the docker link alias or return the default.
Args:
alias_name: The docker link alias
default: The default value if the link isn't available
allow_none: If the return value can be `None` (i.e. optional)
Examples:
Assuming a Docker link was created with ``docker --link postgres:db``
and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.
>>> envitro.docker.port('DB')
5432
"""
warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)
try:
return int(_split_docker_link(alias_name)[2])
except KeyError as err:
if default or allow_none:
return default
else:
raise err | [
"def",
"port",
"(",
"alias_name",
",",
"default",
"=",
"None",
",",
"allow_none",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"'Will be removed in v1.0'",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"try",
":",
"return",
"int",
"(",
"_split_docker_link",
"(",
"alias_name",
")",
"[",
"2",
"]",
")",
"except",
"KeyError",
"as",
"err",
":",
"if",
"default",
"or",
"allow_none",
":",
"return",
"default",
"else",
":",
"raise",
"err"
] | 34.478261 | 0.002454 | [
"def port(alias_name, default=None, allow_none=False):\n",
" \"\"\"Get the port from the docker link alias or return the default.\n",
"\n",
" Args:\n",
" alias_name: The docker link alias\n",
" default: The default value if the link isn't available\n",
" allow_none: If the return value can be `None` (i.e. optional)\n",
"\n",
" Examples:\n",
" Assuming a Docker link was created with ``docker --link postgres:db``\n",
" and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.\n",
"\n",
" >>> envitro.docker.port('DB')\n",
" 5432\n",
" \"\"\"\n",
" warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n",
" try:\n",
" return int(_split_docker_link(alias_name)[2])\n",
" except KeyError as err:\n",
" if default or allow_none:\n",
" return default\n",
" else:\n",
" raise err"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 23 | 0.002576 |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
assert isinstance(value, six.integer_types)
return str(value).encode("utf_8") | [
"def",
"value_to_db",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"six",
".",
"integer_types",
")",
"return",
"str",
"(",
"value",
")",
".",
"encode",
"(",
"\"utf_8\"",
")"
] | 50.5 | 0.014634 | [
"def value_to_db(self, value):\n",
" \"\"\" Returns field's single value prepared for saving into a database. \"\"\"\n",
" assert isinstance(value, six.integer_types)\n",
" return str(value).encode(\"utf_8\")"
] | [
0,
0.024390243902439025,
0,
0.024390243902439025
] | 4 | 0.012195 |
def get(self, sid):
"""
Constructs a OriginationUrlContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
"""
return OriginationUrlContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, ) | [
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"OriginationUrlContext",
"(",
"self",
".",
"_version",
",",
"trunk_sid",
"=",
"self",
".",
"_solution",
"[",
"'trunk_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
] | 41.8 | 0.01171 | [
"def get(self, sid):\n",
" \"\"\"\n",
" Constructs a OriginationUrlContext\n",
"\n",
" :param sid: The unique string that identifies the resource\n",
"\n",
" :returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext\n",
" :rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext\n",
" \"\"\"\n",
" return OriginationUrlContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0.019801980198019802
] | 10 | 0.012667 |
def from_header(self, header):
"""Generate a SpanContext object using the trace context header.
The value of enabled parsed from header is int. Need to convert to
bool.
:type header: str
:param header: Trace context header which was extracted from the HTTP
request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
if header is None:
return SpanContext()
try:
match = re.search(_TRACE_CONTEXT_HEADER_RE, header)
except TypeError:
logging.warning(
'Header should be str, got {}. Cannot parse the header.'
.format(header.__class__.__name__))
raise
if match:
trace_id = match.group(1)
span_id = match.group(3)
trace_options = match.group(5)
if trace_options is None:
trace_options = 1
span_context = SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=TraceOptions(trace_options),
from_header=True)
return span_context
else:
logging.warning(
'Cannot parse the header {}, generate a new context instead.'
.format(header))
return SpanContext() | [
"def",
"from_header",
"(",
"self",
",",
"header",
")",
":",
"if",
"header",
"is",
"None",
":",
"return",
"SpanContext",
"(",
")",
"try",
":",
"match",
"=",
"re",
".",
"search",
"(",
"_TRACE_CONTEXT_HEADER_RE",
",",
"header",
")",
"except",
"TypeError",
":",
"logging",
".",
"warning",
"(",
"'Header should be str, got {}. Cannot parse the header.'",
".",
"format",
"(",
"header",
".",
"__class__",
".",
"__name__",
")",
")",
"raise",
"if",
"match",
":",
"trace_id",
"=",
"match",
".",
"group",
"(",
"1",
")",
"span_id",
"=",
"match",
".",
"group",
"(",
"3",
")",
"trace_options",
"=",
"match",
".",
"group",
"(",
"5",
")",
"if",
"trace_options",
"is",
"None",
":",
"trace_options",
"=",
"1",
"span_context",
"=",
"SpanContext",
"(",
"trace_id",
"=",
"trace_id",
",",
"span_id",
"=",
"span_id",
",",
"trace_options",
"=",
"TraceOptions",
"(",
"trace_options",
")",
",",
"from_header",
"=",
"True",
")",
"return",
"span_context",
"else",
":",
"logging",
".",
"warning",
"(",
"'Cannot parse the header {}, generate a new context instead.'",
".",
"format",
"(",
"header",
")",
")",
"return",
"SpanContext",
"(",
")"
] | 33.690476 | 0.001374 | [
"def from_header(self, header):\n",
" \"\"\"Generate a SpanContext object using the trace context header.\n",
" The value of enabled parsed from header is int. Need to convert to\n",
" bool.\n",
"\n",
" :type header: str\n",
" :param header: Trace context header which was extracted from the HTTP\n",
" request headers.\n",
"\n",
" :rtype: :class:`~opencensus.trace.span_context.SpanContext`\n",
" :returns: SpanContext generated from the trace context header.\n",
" \"\"\"\n",
" if header is None:\n",
" return SpanContext()\n",
"\n",
" try:\n",
" match = re.search(_TRACE_CONTEXT_HEADER_RE, header)\n",
" except TypeError:\n",
" logging.warning(\n",
" 'Header should be str, got {}. Cannot parse the header.'\n",
" .format(header.__class__.__name__))\n",
" raise\n",
"\n",
" if match:\n",
" trace_id = match.group(1)\n",
" span_id = match.group(3)\n",
" trace_options = match.group(5)\n",
"\n",
" if trace_options is None:\n",
" trace_options = 1\n",
"\n",
" span_context = SpanContext(\n",
" trace_id=trace_id,\n",
" span_id=span_id,\n",
" trace_options=TraceOptions(trace_options),\n",
" from_header=True)\n",
" return span_context\n",
" else:\n",
" logging.warning(\n",
" 'Cannot parse the header {}, generate a new context instead.'\n",
" .format(header))\n",
" return SpanContext()"
] | [
0,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125
] | 42 | 0.00107 |
def apply_multicolor_transit(self,band,depth):
"""
Applies constraint corresponding to measuring transit in different band
This is not implemented yet.
"""
if '{} band transit'.format(band) not in self.constraints:
self.constraints.append('{} band transit'.format(band))
for pop in self.poplist:
pop.apply_multicolor_transit(band,depth) | [
"def",
"apply_multicolor_transit",
"(",
"self",
",",
"band",
",",
"depth",
")",
":",
"if",
"'{} band transit'",
".",
"format",
"(",
"band",
")",
"not",
"in",
"self",
".",
"constraints",
":",
"self",
".",
"constraints",
".",
"append",
"(",
"'{} band transit'",
".",
"format",
"(",
"band",
")",
")",
"for",
"pop",
"in",
"self",
".",
"poplist",
":",
"pop",
".",
"apply_multicolor_transit",
"(",
"band",
",",
"depth",
")"
] | 40 | 0.012225 | [
"def apply_multicolor_transit(self,band,depth):\n",
" \"\"\"\n",
" Applies constraint corresponding to measuring transit in different band\n",
"\n",
" This is not implemented yet.\n",
" \"\"\"\n",
" if '{} band transit'.format(band) not in self.constraints:\n",
" self.constraints.append('{} band transit'.format(band))\n",
" for pop in self.poplist:\n",
" pop.apply_multicolor_transit(band,depth)"
] | [
0.0425531914893617,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464
] | 10 | 0.016435 |
def add_view(
self,
baseview,
name,
href="",
icon="",
label="",
category="",
category_icon="",
category_label="",
):
"""
Add your views associated with menus using this method.
:param baseview:
A BaseView type class instantiated or not.
This method will instantiate the class for you if needed.
:param name:
The string name that identifies the menu.
:param href:
Override the generated href for the menu.
You can use an url string or an endpoint name
if non provided default_view from view will be set as href.
:param icon:
Font-Awesome icon name, optional.
:param label:
The label that will be displayed on the menu,
if absent param name will be used
:param category:
The menu category where the menu will be included,
if non provided the view will be acessible as a top menu.
:param category_icon:
Font-Awesome icon name for the category, optional.
:param category_label:
The label that will be displayed on the menu,
if absent param name will be used
Examples::
appbuilder = AppBuilder(app, db)
# Register a view, rendering a top menu without icon.
appbuilder.add_view(MyModelView(), "My View")
# or not instantiated
appbuilder.add_view(MyModelView, "My View")
# Register a view, a submenu "Other View" from "Other" with a phone icon.
appbuilder.add_view(
MyOtherModelView,
"Other View",
icon='fa-phone',
category="Others"
)
# Register a view, with category icon and translation.
appbuilder.add_view(
YetOtherModelView,
"Other View",
icon='fa-phone',
label=_('Other View'),
category="Others",
category_icon='fa-envelop',
category_label=_('Other View')
)
# Add a link
appbuilder.add_link("google", href="www.google.com", icon = "fa-google-plus")
"""
baseview = self._check_and_init(baseview)
log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, name))
if not self._view_exists(baseview):
baseview.appbuilder = self
self.baseviews.append(baseview)
self._process_inner_views()
if self.app:
self.register_blueprint(baseview)
self._add_permission(baseview)
self.add_link(
name=name,
href=href,
icon=icon,
label=label,
category=category,
category_icon=category_icon,
category_label=category_label,
baseview=baseview,
)
return baseview | [
"def",
"add_view",
"(",
"self",
",",
"baseview",
",",
"name",
",",
"href",
"=",
"\"\"",
",",
"icon",
"=",
"\"\"",
",",
"label",
"=",
"\"\"",
",",
"category",
"=",
"\"\"",
",",
"category_icon",
"=",
"\"\"",
",",
"category_label",
"=",
"\"\"",
",",
")",
":",
"baseview",
"=",
"self",
".",
"_check_and_init",
"(",
"baseview",
")",
"log",
".",
"info",
"(",
"LOGMSG_INF_FAB_ADD_VIEW",
".",
"format",
"(",
"baseview",
".",
"__class__",
".",
"__name__",
",",
"name",
")",
")",
"if",
"not",
"self",
".",
"_view_exists",
"(",
"baseview",
")",
":",
"baseview",
".",
"appbuilder",
"=",
"self",
"self",
".",
"baseviews",
".",
"append",
"(",
"baseview",
")",
"self",
".",
"_process_inner_views",
"(",
")",
"if",
"self",
".",
"app",
":",
"self",
".",
"register_blueprint",
"(",
"baseview",
")",
"self",
".",
"_add_permission",
"(",
"baseview",
")",
"self",
".",
"add_link",
"(",
"name",
"=",
"name",
",",
"href",
"=",
"href",
",",
"icon",
"=",
"icon",
",",
"label",
"=",
"label",
",",
"category",
"=",
"category",
",",
"category_icon",
"=",
"category_icon",
",",
"category_label",
"=",
"category_label",
",",
"baseview",
"=",
"baseview",
",",
")",
"return",
"baseview"
] | 34.858824 | 0.001969 | [
"def add_view(\n",
" self,\n",
" baseview,\n",
" name,\n",
" href=\"\",\n",
" icon=\"\",\n",
" label=\"\",\n",
" category=\"\",\n",
" category_icon=\"\",\n",
" category_label=\"\",\n",
" ):\n",
" \"\"\"\n",
" Add your views associated with menus using this method.\n",
"\n",
" :param baseview:\n",
" A BaseView type class instantiated or not.\n",
" This method will instantiate the class for you if needed.\n",
" :param name:\n",
" The string name that identifies the menu.\n",
" :param href:\n",
" Override the generated href for the menu.\n",
" You can use an url string or an endpoint name\n",
" if non provided default_view from view will be set as href.\n",
" :param icon:\n",
" Font-Awesome icon name, optional.\n",
" :param label:\n",
" The label that will be displayed on the menu,\n",
" if absent param name will be used\n",
" :param category:\n",
" The menu category where the menu will be included,\n",
" if non provided the view will be acessible as a top menu.\n",
" :param category_icon:\n",
" Font-Awesome icon name for the category, optional.\n",
" :param category_label:\n",
" The label that will be displayed on the menu,\n",
" if absent param name will be used\n",
"\n",
" Examples::\n",
"\n",
" appbuilder = AppBuilder(app, db)\n",
" # Register a view, rendering a top menu without icon.\n",
" appbuilder.add_view(MyModelView(), \"My View\")\n",
" # or not instantiated\n",
" appbuilder.add_view(MyModelView, \"My View\")\n",
" # Register a view, a submenu \"Other View\" from \"Other\" with a phone icon.\n",
" appbuilder.add_view(\n",
" MyOtherModelView,\n",
" \"Other View\",\n",
" icon='fa-phone',\n",
" category=\"Others\"\n",
" )\n",
" # Register a view, with category icon and translation.\n",
" appbuilder.add_view(\n",
" YetOtherModelView,\n",
" \"Other View\",\n",
" icon='fa-phone',\n",
" label=_('Other View'),\n",
" category=\"Others\",\n",
" category_icon='fa-envelop',\n",
" category_label=_('Other View')\n",
" )\n",
" # Add a link\n",
" appbuilder.add_link(\"google\", href=\"www.google.com\", icon = \"fa-google-plus\")\n",
" \"\"\"\n",
" baseview = self._check_and_init(baseview)\n",
" log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, name))\n",
"\n",
" if not self._view_exists(baseview):\n",
" baseview.appbuilder = self\n",
" self.baseviews.append(baseview)\n",
" self._process_inner_views()\n",
" if self.app:\n",
" self.register_blueprint(baseview)\n",
" self._add_permission(baseview)\n",
" self.add_link(\n",
" name=name,\n",
" href=href,\n",
" icon=icon,\n",
" label=label,\n",
" category=category,\n",
" category_icon=category_icon,\n",
" category_label=category_label,\n",
" baseview=baseview,\n",
" )\n",
" return baseview"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.14285714285714285,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216
] | 85 | 0.00358 |
def do_some_work(
self,
work_dict):
"""do_some_work
:param work_dict: dictionary for key/values
"""
label = "do_some_work"
log.info(("task - {} - start "
"work_dict={}")
.format(label,
work_dict))
ret_data = {
"job_results": ("some response key={}").format(
str(uuid.uuid4()))
}
log.info(("task - {} - result={} done")
.format(
ret_data,
label))
return ret_data | [
"def",
"do_some_work",
"(",
"self",
",",
"work_dict",
")",
":",
"label",
"=",
"\"do_some_work\"",
"log",
".",
"info",
"(",
"(",
"\"task - {} - start \"",
"\"work_dict={}\"",
")",
".",
"format",
"(",
"label",
",",
"work_dict",
")",
")",
"ret_data",
"=",
"{",
"\"job_results\"",
":",
"(",
"\"some response key={}\"",
")",
".",
"format",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"}",
"log",
".",
"info",
"(",
"(",
"\"task - {} - result={} done\"",
")",
".",
"format",
"(",
"ret_data",
",",
"label",
")",
")",
"return",
"ret_data"
] | 20.192308 | 0.001818 | [
"def do_some_work(\n",
" self,\n",
" work_dict):\n",
" \"\"\"do_some_work\n",
"\n",
" :param work_dict: dictionary for key/values\n",
" \"\"\"\n",
"\n",
" label = \"do_some_work\"\n",
"\n",
" log.info((\"task - {} - start \"\n",
" \"work_dict={}\")\n",
" .format(label,\n",
" work_dict))\n",
"\n",
" ret_data = {\n",
" \"job_results\": (\"some response key={}\").format(\n",
" str(uuid.uuid4()))\n",
" }\n",
"\n",
" log.info((\"task - {} - result={} done\")\n",
" .format(\n",
" ret_data,\n",
" label))\n",
"\n",
" return ret_data"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 26 | 0.002024 |
def active_vectors_info(self):
"""Return the active scalar's field and name: [field, name]"""
if not hasattr(self, '_active_vectors_info'):
self._active_vectors_info = [POINT_DATA_FIELD, None] # field and name
_, name = self._active_vectors_info
# rare error where scalar name isn't a valid scalar
if name not in self.point_arrays:
if name not in self.cell_arrays:
name = None
return self._active_vectors_info | [
"def",
"active_vectors_info",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_active_vectors_info'",
")",
":",
"self",
".",
"_active_vectors_info",
"=",
"[",
"POINT_DATA_FIELD",
",",
"None",
"]",
"# field and name",
"_",
",",
"name",
"=",
"self",
".",
"_active_vectors_info",
"# rare error where scalar name isn't a valid scalar",
"if",
"name",
"not",
"in",
"self",
".",
"point_arrays",
":",
"if",
"name",
"not",
"in",
"self",
".",
"cell_arrays",
":",
"name",
"=",
"None",
"return",
"self",
".",
"_active_vectors_info"
] | 40.666667 | 0.008016 | [
"def active_vectors_info(self):\n",
" \"\"\"Return the active scalar's field and name: [field, name]\"\"\"\n",
" if not hasattr(self, '_active_vectors_info'):\n",
" self._active_vectors_info = [POINT_DATA_FIELD, None] # field and name\n",
" _, name = self._active_vectors_info\n",
"\n",
" # rare error where scalar name isn't a valid scalar\n",
" if name not in self.point_arrays:\n",
" if name not in self.cell_arrays:\n",
" name = None\n",
"\n",
" return self._active_vectors_info"
] | [
0,
0.014084507042253521,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0.025
] | 12 | 0.00529 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 4