text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
| texts
sequence | scores
sequence | num_lines
int64 3
2.77k
| avg_score
float64 0
0.37
|
---|---|---|---|---|---|---|---|
def heatmap(n_x=5,n_y=10):
"""
Returns a DataFrame with the required format for
a heatmap plot
Parameters:
-----------
n_x : int
Number of x categories
n_y : int
Number of y categories
"""
x=['x_'+str(_) for _ in range(n_x)]
y=['y_'+str(_) for _ in range(n_y)]
return pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y) | [
"def",
"heatmap",
"(",
"n_x",
"=",
"5",
",",
"n_y",
"=",
"10",
")",
":",
"x",
"=",
"[",
"'x_'",
"+",
"str",
"(",
"_",
")",
"for",
"_",
"in",
"range",
"(",
"n_x",
")",
"]",
"y",
"=",
"[",
"'y_'",
"+",
"str",
"(",
"_",
")",
"for",
"_",
"in",
"range",
"(",
"n_y",
")",
"]",
"return",
"pd",
".",
"DataFrame",
"(",
"surface",
"(",
"n_x",
"-",
"1",
",",
"n_y",
"-",
"1",
")",
".",
"values",
",",
"index",
"=",
"x",
",",
"columns",
"=",
"y",
")"
] | 22.333333 | 0.063037 | [
"def heatmap(n_x=5,n_y=10):\n",
"\t\"\"\"\n",
"\tReturns a DataFrame with the required format for \n",
"\ta heatmap plot\n",
"\n",
"\tParameters:\n",
"\t-----------\n",
"\t\tn_x : int\n",
"\t\t\tNumber of x categories\n",
"\t\tn_y : int\n",
"\t\t\tNumber of y categories\n",
"\t\"\"\"\t\n",
"\tx=['x_'+str(_) for _ in range(n_x)]\n",
"\ty=['y_'+str(_) for _ in range(n_y)]\n",
"\treturn pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y)"
] | [
0.037037037037037035,
0.2,
0.0392156862745098,
0.0625,
0,
0.07692307692307693,
0.07692307692307693,
0.08333333333333333,
0.038461538461538464,
0.08333333333333333,
0.038461538461538464,
0.3333333333333333,
0.05405405405405406,
0.05405405405405406,
0.07462686567164178
] | 15 | 0.083484 |
def convert_batchnorm(builder, layer, input_names, output_names, keras_layer):
"""
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
# Currently CoreML supports only per-channel batch-norm
if keras_layer.mode != 0:
raise NotImplementedError(
'Currently supports only per-feature normalization')
axis = keras_layer.axis
nb_channels = keras_layer.input_shape[axis]
# Set parameters
# Parameter arrangement in Keras: gamma, beta, mean, variance
gamma = keras_layer.get_weights()[0]
beta = keras_layer.get_weights()[1]
mean = keras_layer.get_weights()[2]
std = keras_layer.get_weights()[3]
# compute adjusted parameters
variance = std * std
f = 1.0 / np.sqrt(std + keras_layer.epsilon)
gamma1 = gamma*f
beta1 = beta - gamma*mean*f
mean[:] = 0.0 #mean
variance[:] = 1.0 - .00001 #stddev
builder.add_batchnorm(
name = layer,
channels = nb_channels,
gamma = gamma1,
beta = beta1,
mean = mean,
variance = variance,
input_name = input_name,
output_name = output_name) | [
"def",
"convert_batchnorm",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"# Currently CoreML supports only per-channel batch-norm",
"if",
"keras_layer",
".",
"mode",
"!=",
"0",
":",
"raise",
"NotImplementedError",
"(",
"'Currently supports only per-feature normalization'",
")",
"axis",
"=",
"keras_layer",
".",
"axis",
"nb_channels",
"=",
"keras_layer",
".",
"input_shape",
"[",
"axis",
"]",
"# Set parameters",
"# Parameter arrangement in Keras: gamma, beta, mean, variance",
"gamma",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"0",
"]",
"beta",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"1",
"]",
"mean",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"2",
"]",
"std",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"3",
"]",
"# compute adjusted parameters",
"variance",
"=",
"std",
"*",
"std",
"f",
"=",
"1.0",
"/",
"np",
".",
"sqrt",
"(",
"std",
"+",
"keras_layer",
".",
"epsilon",
")",
"gamma1",
"=",
"gamma",
"*",
"f",
"beta1",
"=",
"beta",
"-",
"gamma",
"*",
"mean",
"*",
"f",
"mean",
"[",
":",
"]",
"=",
"0.0",
"#mean",
"variance",
"[",
":",
"]",
"=",
"1.0",
"-",
".00001",
"#stddev",
"builder",
".",
"add_batchnorm",
"(",
"name",
"=",
"layer",
",",
"channels",
"=",
"nb_channels",
",",
"gamma",
"=",
"gamma1",
",",
"beta",
"=",
"beta1",
",",
"mean",
"=",
"mean",
",",
"variance",
"=",
"variance",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | 28.422222 | 0.016629 | [
"def convert_batchnorm(builder, layer, input_names, output_names, keras_layer):\n",
" \"\"\"\n",
" Parameters\n",
" keras_layer: layer\n",
" A keras layer object.\n",
"\n",
" builder: NeuralNetworkBuilder\n",
" A neural network builder object.\n",
" \"\"\"\n",
"\n",
" # Get input and output names\n",
" input_name, output_name = (input_names[0], output_names[0])\n",
"\n",
" # Currently CoreML supports only per-channel batch-norm\n",
" if keras_layer.mode != 0:\n",
" raise NotImplementedError(\n",
" 'Currently supports only per-feature normalization')\n",
"\n",
" axis = keras_layer.axis\n",
" nb_channels = keras_layer.input_shape[axis]\n",
"\n",
"\n",
" # Set parameters\n",
" # Parameter arrangement in Keras: gamma, beta, mean, variance\n",
" gamma = keras_layer.get_weights()[0]\n",
" beta = keras_layer.get_weights()[1]\n",
" mean = keras_layer.get_weights()[2]\n",
" std = keras_layer.get_weights()[3]\n",
" # compute adjusted parameters\n",
" variance = std * std\n",
" f = 1.0 / np.sqrt(std + keras_layer.epsilon)\n",
" gamma1 = gamma*f\n",
" beta1 = beta - gamma*mean*f\n",
" mean[:] = 0.0 #mean\n",
" variance[:] = 1.0 - .00001 #stddev\n",
"\n",
" builder.add_batchnorm(\n",
" name = layer,\n",
" channels = nb_channels,\n",
" gamma = gamma1,\n",
" beta = beta1,\n",
" mean = mean,\n",
" variance = variance,\n",
" input_name = input_name,\n",
" output_name = output_name)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0.05128205128205128,
0,
0,
0.09090909090909091,
0.0625,
0.08333333333333333,
0.09090909090909091,
0.09523809523809523,
0.06896551724137931,
0.06060606060606061,
0.08823529411764706
] | 45 | 0.018287 |
def update_offset(self, offset):
# type: (int) -> None
'''
Update the offset for this CE record.
Parameters:
extent - The new offset for this CE record.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('CE record not yet initialized!')
self.offset_cont_area = offset | [
"def",
"update_offset",
"(",
"self",
",",
"offset",
")",
":",
"# type: (int) -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'CE record not yet initialized!'",
")",
"self",
".",
"offset_cont_area",
"=",
"offset"
] | 28 | 0.009877 | [
"def update_offset(self, offset):\n",
" # type: (int) -> None\n",
" '''\n",
" Update the offset for this CE record.\n",
"\n",
" Parameters:\n",
" extent - The new offset for this CE record.\n",
" Returns:\n",
" Nothing.\n",
" '''\n",
" if not self._initialized:\n",
" raise pycdlibexception.PyCdlibInternalError('CE record not yet initialized!')\n",
"\n",
" self.offset_cont_area = offset"
] | [
0,
0.03333333333333333,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0.02631578947368421
] | 14 | 0.011007 |
def get_param(self):
"""Method to get current optimizer's parameter value
"""
lr_list = self.lr_scheduler.get_lr()
if len(lr_list) > 1:
raise ValueError("Optimizer passed to lr_scheduler should have a single param group, "
"but currently there are {} param groups".format(len(lr_list)))
return lr_list[0] | [
"def",
"get_param",
"(",
"self",
")",
":",
"lr_list",
"=",
"self",
".",
"lr_scheduler",
".",
"get_lr",
"(",
")",
"if",
"len",
"(",
"lr_list",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Optimizer passed to lr_scheduler should have a single param group, \"",
"\"but currently there are {} param groups\"",
".",
"format",
"(",
"len",
"(",
"lr_list",
")",
")",
")",
"return",
"lr_list",
"[",
"0",
"]"
] | 47.25 | 0.01039 | [
"def get_param(self):\n",
" \"\"\"Method to get current optimizer's parameter value\n",
" \"\"\"\n",
" lr_list = self.lr_scheduler.get_lr()\n",
" if len(lr_list) > 1:\n",
" raise ValueError(\"Optimizer passed to lr_scheduler should have a single param group, \"\n",
" \"but currently there are {} param groups\".format(len(lr_list)))\n",
" return lr_list[0]"
] | [
0,
0.01639344262295082,
0,
0,
0,
0.010101010101010102,
0.010752688172043012,
0.04
] | 8 | 0.009656 |
def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None):
'''
.. versionchanged:: 2018.3.0
``dest`` can now be a directory
Used to get a single file from a URL.
path
A URL to download a file from. Supported URL schemes are: ``salt://``,
``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and
``file://`` (local filesystem). If no scheme was specified, this is
equivalent of using ``file://``.
If a ``file://`` URL is given, the function just returns absolute path
to that file on a local filesystem.
The function returns ``False`` if Salt was unable to fetch a file from
a ``salt://`` URL.
dest
The default behaviour is to write the fetched file to the given
destination path. If this parameter is omitted or set as empty string
(``''``), the function places the remote file on the local filesystem
inside the Minion cache directory and returns the path to that file.
.. note::
To simply return the file contents instead, set destination to
``None``. This works with ``salt://``, ``http://``, ``https://``
and ``file://`` URLs. The files fetched by ``http://`` and
``https://`` will not be cached.
saltenv : base
Salt fileserver envrionment from which to retrieve the file. Ignored if
``path`` is not a ``salt://`` URL.
source_hash
If ``path`` is an http(s) or ftp URL and the file exists in the
minion's file cache, this option can be passed to keep the minion from
re-downloading the file if the cached copy matches the specified hash.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine
salt '*' cp.get_url http://www.slashdot.org /tmp/index.html
'''
if isinstance(dest, six.string_types):
result = _client().get_url(
path, dest, makedirs, saltenv, source_hash=source_hash)
else:
result = _client().get_url(
path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash)
if not result:
log.error('Unable to fetch file %s from saltenv %s.',
salt.utils.url.redact_http_basic_auth(path),
saltenv)
return result | [
"def",
"get_url",
"(",
"path",
",",
"dest",
"=",
"''",
",",
"saltenv",
"=",
"'base'",
",",
"makedirs",
"=",
"False",
",",
"source_hash",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"dest",
",",
"six",
".",
"string_types",
")",
":",
"result",
"=",
"_client",
"(",
")",
".",
"get_url",
"(",
"path",
",",
"dest",
",",
"makedirs",
",",
"saltenv",
",",
"source_hash",
"=",
"source_hash",
")",
"else",
":",
"result",
"=",
"_client",
"(",
")",
".",
"get_url",
"(",
"path",
",",
"None",
",",
"makedirs",
",",
"saltenv",
",",
"no_cache",
"=",
"True",
",",
"source_hash",
"=",
"source_hash",
")",
"if",
"not",
"result",
":",
"log",
".",
"error",
"(",
"'Unable to fetch file %s from saltenv %s.'",
",",
"salt",
".",
"utils",
".",
"url",
".",
"redact_http_basic_auth",
"(",
"path",
")",
",",
"saltenv",
")",
"return",
"result"
] | 39.559322 | 0.000836 | [
"def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None):\n",
" '''\n",
" .. versionchanged:: 2018.3.0\n",
" ``dest`` can now be a directory\n",
"\n",
" Used to get a single file from a URL.\n",
"\n",
" path\n",
" A URL to download a file from. Supported URL schemes are: ``salt://``,\n",
" ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and\n",
" ``file://`` (local filesystem). If no scheme was specified, this is\n",
" equivalent of using ``file://``.\n",
" If a ``file://`` URL is given, the function just returns absolute path\n",
" to that file on a local filesystem.\n",
" The function returns ``False`` if Salt was unable to fetch a file from\n",
" a ``salt://`` URL.\n",
"\n",
" dest\n",
" The default behaviour is to write the fetched file to the given\n",
" destination path. If this parameter is omitted or set as empty string\n",
" (``''``), the function places the remote file on the local filesystem\n",
" inside the Minion cache directory and returns the path to that file.\n",
"\n",
" .. note::\n",
"\n",
" To simply return the file contents instead, set destination to\n",
" ``None``. This works with ``salt://``, ``http://``, ``https://``\n",
" and ``file://`` URLs. The files fetched by ``http://`` and\n",
" ``https://`` will not be cached.\n",
"\n",
" saltenv : base\n",
" Salt fileserver envrionment from which to retrieve the file. Ignored if\n",
" ``path`` is not a ``salt://`` URL.\n",
"\n",
" source_hash\n",
" If ``path`` is an http(s) or ftp URL and the file exists in the\n",
" minion's file cache, this option can be passed to keep the minion from\n",
" re-downloading the file if the cached copy matches the specified hash.\n",
"\n",
" .. versionadded:: 2018.3.0\n",
"\n",
" CLI Example:\n",
"\n",
" .. code-block:: bash\n",
"\n",
" salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine\n",
" salt '*' cp.get_url http://www.slashdot.org /tmp/index.html\n",
" '''\n",
" if isinstance(dest, six.string_types):\n",
" result = _client().get_url(\n",
" path, dest, makedirs, saltenv, source_hash=source_hash)\n",
" else:\n",
" result = _client().get_url(\n",
" path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash)\n",
" if not result:\n",
" log.error('Unable to fetch file %s from saltenv %s.',\n",
" salt.utils.url.redact_http_basic_auth(path),\n",
" saltenv)\n",
" return result"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.058823529411764705
] | 59 | 0.001201 |
def write_aims(filename, atoms):
"""Method to write FHI-aims geometry files in phonopy context."""
lines = ""
lines += "# geometry.in for FHI-aims \n"
lines += "# | generated by phonopy.FHIaims.write_aims() \n"
lattice_vector_line = "lattice_vector " + "%16.16f "*3 + "\n"
for vec in atoms.get_cell():
lines += lattice_vector_line % tuple(vec)
N = atoms.get_number_of_atoms()
atom_line = "atom " + "%16.16f "*3 + "%s \n"
positions = atoms.get_positions()
symbols = atoms.get_chemical_symbols()
initial_moment_line = "initial_moment %16.6f\n"
magmoms = atoms.get_magnetic_moments()
for n in range(N):
lines += atom_line % (tuple(positions[n]) + (symbols[n],))
if magmoms is not None:
lines += initial_moment_line % magmoms[n]
with open(filename, 'w') as f:
f.write(lines) | [
"def",
"write_aims",
"(",
"filename",
",",
"atoms",
")",
":",
"lines",
"=",
"\"\"",
"lines",
"+=",
"\"# geometry.in for FHI-aims \\n\"",
"lines",
"+=",
"\"# | generated by phonopy.FHIaims.write_aims() \\n\"",
"lattice_vector_line",
"=",
"\"lattice_vector \"",
"+",
"\"%16.16f \"",
"*",
"3",
"+",
"\"\\n\"",
"for",
"vec",
"in",
"atoms",
".",
"get_cell",
"(",
")",
":",
"lines",
"+=",
"lattice_vector_line",
"%",
"tuple",
"(",
"vec",
")",
"N",
"=",
"atoms",
".",
"get_number_of_atoms",
"(",
")",
"atom_line",
"=",
"\"atom \"",
"+",
"\"%16.16f \"",
"*",
"3",
"+",
"\"%s \\n\"",
"positions",
"=",
"atoms",
".",
"get_positions",
"(",
")",
"symbols",
"=",
"atoms",
".",
"get_chemical_symbols",
"(",
")",
"initial_moment_line",
"=",
"\"initial_moment %16.6f\\n\"",
"magmoms",
"=",
"atoms",
".",
"get_magnetic_moments",
"(",
")",
"for",
"n",
"in",
"range",
"(",
"N",
")",
":",
"lines",
"+=",
"atom_line",
"%",
"(",
"tuple",
"(",
"positions",
"[",
"n",
"]",
")",
"+",
"(",
"symbols",
"[",
"n",
"]",
",",
")",
")",
"if",
"magmoms",
"is",
"not",
"None",
":",
"lines",
"+=",
"initial_moment_line",
"%",
"magmoms",
"[",
"n",
"]",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"lines",
")"
] | 31.518519 | 0.00114 | [
"def write_aims(filename, atoms):\n",
" \"\"\"Method to write FHI-aims geometry files in phonopy context.\"\"\"\n",
"\n",
" lines = \"\"\n",
" lines += \"# geometry.in for FHI-aims \\n\"\n",
" lines += \"# | generated by phonopy.FHIaims.write_aims() \\n\"\n",
"\n",
" lattice_vector_line = \"lattice_vector \" + \"%16.16f \"*3 + \"\\n\"\n",
" for vec in atoms.get_cell():\n",
" lines += lattice_vector_line % tuple(vec)\n",
"\n",
" N = atoms.get_number_of_atoms()\n",
"\n",
" atom_line = \"atom \" + \"%16.16f \"*3 + \"%s \\n\"\n",
" positions = atoms.get_positions()\n",
" symbols = atoms.get_chemical_symbols()\n",
"\n",
" initial_moment_line = \"initial_moment %16.6f\\n\"\n",
" magmoms = atoms.get_magnetic_moments()\n",
"\n",
" for n in range(N):\n",
" lines += atom_line % (tuple(positions[n]) + (symbols[n],))\n",
" if magmoms is not None:\n",
" lines += initial_moment_line % magmoms[n]\n",
"\n",
" with open(filename, 'w') as f:\n",
" f.write(lines)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 27 | 0.001684 |
def set_client_ca_list(self, certificate_authorities):
"""
Set the list of preferred client certificate signers for this server
context.
This list of certificate authorities will be sent to the client when
the server requests a client certificate.
:param certificate_authorities: a sequence of X509Names.
:return: None
.. versionadded:: 0.10
"""
name_stack = _lib.sk_X509_NAME_new_null()
_openssl_assert(name_stack != _ffi.NULL)
try:
for ca_name in certificate_authorities:
if not isinstance(ca_name, X509Name):
raise TypeError(
"client CAs must be X509Name objects, not %s "
"objects" % (
type(ca_name).__name__,
)
)
copy = _lib.X509_NAME_dup(ca_name._name)
_openssl_assert(copy != _ffi.NULL)
push_result = _lib.sk_X509_NAME_push(name_stack, copy)
if not push_result:
_lib.X509_NAME_free(copy)
_raise_current_error()
except Exception:
_lib.sk_X509_NAME_free(name_stack)
raise
_lib.SSL_CTX_set_client_CA_list(self._context, name_stack) | [
"def",
"set_client_ca_list",
"(",
"self",
",",
"certificate_authorities",
")",
":",
"name_stack",
"=",
"_lib",
".",
"sk_X509_NAME_new_null",
"(",
")",
"_openssl_assert",
"(",
"name_stack",
"!=",
"_ffi",
".",
"NULL",
")",
"try",
":",
"for",
"ca_name",
"in",
"certificate_authorities",
":",
"if",
"not",
"isinstance",
"(",
"ca_name",
",",
"X509Name",
")",
":",
"raise",
"TypeError",
"(",
"\"client CAs must be X509Name objects, not %s \"",
"\"objects\"",
"%",
"(",
"type",
"(",
"ca_name",
")",
".",
"__name__",
",",
")",
")",
"copy",
"=",
"_lib",
".",
"X509_NAME_dup",
"(",
"ca_name",
".",
"_name",
")",
"_openssl_assert",
"(",
"copy",
"!=",
"_ffi",
".",
"NULL",
")",
"push_result",
"=",
"_lib",
".",
"sk_X509_NAME_push",
"(",
"name_stack",
",",
"copy",
")",
"if",
"not",
"push_result",
":",
"_lib",
".",
"X509_NAME_free",
"(",
"copy",
")",
"_raise_current_error",
"(",
")",
"except",
"Exception",
":",
"_lib",
".",
"sk_X509_NAME_free",
"(",
"name_stack",
")",
"raise",
"_lib",
".",
"SSL_CTX_set_client_CA_list",
"(",
"self",
".",
"_context",
",",
"name_stack",
")"
] | 36.472222 | 0.001484 | [
"def set_client_ca_list(self, certificate_authorities):\n",
" \"\"\"\n",
" Set the list of preferred client certificate signers for this server\n",
" context.\n",
"\n",
" This list of certificate authorities will be sent to the client when\n",
" the server requests a client certificate.\n",
"\n",
" :param certificate_authorities: a sequence of X509Names.\n",
" :return: None\n",
"\n",
" .. versionadded:: 0.10\n",
" \"\"\"\n",
" name_stack = _lib.sk_X509_NAME_new_null()\n",
" _openssl_assert(name_stack != _ffi.NULL)\n",
"\n",
" try:\n",
" for ca_name in certificate_authorities:\n",
" if not isinstance(ca_name, X509Name):\n",
" raise TypeError(\n",
" \"client CAs must be X509Name objects, not %s \"\n",
" \"objects\" % (\n",
" type(ca_name).__name__,\n",
" )\n",
" )\n",
" copy = _lib.X509_NAME_dup(ca_name._name)\n",
" _openssl_assert(copy != _ffi.NULL)\n",
" push_result = _lib.sk_X509_NAME_push(name_stack, copy)\n",
" if not push_result:\n",
" _lib.X509_NAME_free(copy)\n",
" _raise_current_error()\n",
" except Exception:\n",
" _lib.sk_X509_NAME_free(name_stack)\n",
" raise\n",
"\n",
" _lib.SSL_CTX_set_client_CA_list(self._context, name_stack)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152
] | 36 | 0.002736 |
def mean_if_greater_than_zero(vals):
""" Calculate mean over numerical values, ignoring values less than zero.
E.g. used for mean time over coincident triggers when timestamps are set
to -1 for ifos not included in the coincidence.
Parameters
----------
vals: iterator of numerical values
values to be mean averaged
Returns
-------
mean: float
The mean of the values in the original vector which are
greater than zero
num_above_zero: int
The number of entries in the vector which are above zero
"""
vals = numpy.array(vals)
above_zero = vals > 0
return vals[above_zero].mean(), above_zero.sum() | [
"def",
"mean_if_greater_than_zero",
"(",
"vals",
")",
":",
"vals",
"=",
"numpy",
".",
"array",
"(",
"vals",
")",
"above_zero",
"=",
"vals",
">",
"0",
"return",
"vals",
"[",
"above_zero",
"]",
".",
"mean",
"(",
")",
",",
"above_zero",
".",
"sum",
"(",
")"
] | 31.619048 | 0.001462 | [
"def mean_if_greater_than_zero(vals):\n",
" \"\"\" Calculate mean over numerical values, ignoring values less than zero.\n",
" E.g. used for mean time over coincident triggers when timestamps are set\n",
" to -1 for ifos not included in the coincidence.\n",
"\n",
" Parameters\n",
" ----------\n",
" vals: iterator of numerical values\n",
" values to be mean averaged\n",
"\n",
" Returns\n",
" -------\n",
" mean: float\n",
" The mean of the values in the original vector which are\n",
" greater than zero\n",
" num_above_zero: int\n",
" The number of entries in the vector which are above zero\n",
" \"\"\"\n",
" vals = numpy.array(vals)\n",
" above_zero = vals > 0\n",
" return vals[above_zero].mean(), above_zero.sum()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232
] | 21 | 0.000916 |
def normalizeBoolean(value):
"""
Normalizes a boolean.
* **value** must be an ``int`` with value of 0 or 1, or a ``bool``.
* Returned value will be a boolean.
"""
if isinstance(value, int) and value in (0, 1):
value = bool(value)
if not isinstance(value, bool):
raise ValueError("Boolean values must be True or False, not '%s'."
% value)
return value | [
"def",
"normalizeBoolean",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
"and",
"value",
"in",
"(",
"0",
",",
"1",
")",
":",
"value",
"=",
"bool",
"(",
"value",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"Boolean values must be True or False, not '%s'.\"",
"%",
"value",
")",
"return",
"value"
] | 31.692308 | 0.002358 | [
"def normalizeBoolean(value):\n",
" \"\"\"\n",
" Normalizes a boolean.\n",
"\n",
" * **value** must be an ``int`` with value of 0 or 1, or a ``bool``.\n",
" * Returned value will be a boolean.\n",
" \"\"\"\n",
" if isinstance(value, int) and value in (0, 1):\n",
" value = bool(value)\n",
" if not isinstance(value, bool):\n",
" raise ValueError(\"Boolean values must be True or False, not '%s'.\"\n",
" % value)\n",
" return value"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 13 | 0.004808 |
def which(cmd, safe=False):
"""https://github.com/jc0n/python-which"""
from autopaths.file_path import FilePath
def is_executable(path):
return os.path.exists(path) and os.access(path, os.X_OK) and not os.path.isdir(path)
path, name = os.path.split(cmd)
if path:
if is_executable(cmd): return FilePath(cmd)
else:
for path in os.environ['PATH'].split(os.pathsep):
candidate = os.path.join(path, cmd)
if is_executable(candidate): return FilePath(candidate)
if not safe: raise Exception('which failed to locate a proper command path "%s"' % cmd) | [
"def",
"which",
"(",
"cmd",
",",
"safe",
"=",
"False",
")",
":",
"from",
"autopaths",
".",
"file_path",
"import",
"FilePath",
"def",
"is_executable",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"and",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"X_OK",
")",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"path",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"cmd",
")",
"if",
"path",
":",
"if",
"is_executable",
"(",
"cmd",
")",
":",
"return",
"FilePath",
"(",
"cmd",
")",
"else",
":",
"for",
"path",
"in",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"candidate",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"cmd",
")",
"if",
"is_executable",
"(",
"candidate",
")",
":",
"return",
"FilePath",
"(",
"candidate",
")",
"if",
"not",
"safe",
":",
"raise",
"Exception",
"(",
"'which failed to locate a proper command path \"%s\"'",
"%",
"cmd",
")"
] | 46.615385 | 0.011327 | [
"def which(cmd, safe=False):\n",
" \"\"\"https://github.com/jc0n/python-which\"\"\"\n",
" from autopaths.file_path import FilePath\n",
" def is_executable(path):\n",
" return os.path.exists(path) and os.access(path, os.X_OK) and not os.path.isdir(path)\n",
" path, name = os.path.split(cmd)\n",
" if path:\n",
" if is_executable(cmd): return FilePath(cmd)\n",
" else:\n",
" for path in os.environ['PATH'].split(os.pathsep):\n",
" candidate = os.path.join(path, cmd)\n",
" if is_executable(candidate): return FilePath(candidate)\n",
" if not safe: raise Exception('which failed to locate a proper command path \"%s\"' % cmd)"
] | [
0,
0,
0,
0.034482758620689655,
0.010752688172043012,
0,
0,
0.019230769230769232,
0,
0,
0,
0.014705882352941176,
0.03296703296703297
] | 13 | 0.008626 |
def set_power(self, state):
"""Sets the power state of the smart plug."""
packet = bytearray(16)
packet[0] = 2
if self.check_nightlight():
packet[4] = 3 if state else 2
else:
packet[4] = 1 if state else 0
self.send_packet(0x6a, packet) | [
"def",
"set_power",
"(",
"self",
",",
"state",
")",
":",
"packet",
"=",
"bytearray",
"(",
"16",
")",
"packet",
"[",
"0",
"]",
"=",
"2",
"if",
"self",
".",
"check_nightlight",
"(",
")",
":",
"packet",
"[",
"4",
"]",
"=",
"3",
"if",
"state",
"else",
"2",
"else",
":",
"packet",
"[",
"4",
"]",
"=",
"1",
"if",
"state",
"else",
"0",
"self",
".",
"send_packet",
"(",
"0x6a",
",",
"packet",
")"
] | 29.222222 | 0.01107 | [
"def set_power(self, state):\n",
" \"\"\"Sets the power state of the smart plug.\"\"\"\n",
" packet = bytearray(16)\n",
" packet[0] = 2\n",
" if self.check_nightlight():\n",
" packet[4] = 3 if state else 2\n",
" else:\n",
" packet[4] = 1 if state else 0\n",
" self.send_packet(0x6a, packet)"
] | [
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0.027777777777777776,
0.029411764705882353
] | 9 | 0.009441 |
def compare_schemas(one, two):
"""Compare two structures that represents JSON schemas.
For comparison you can't use normal comparison, because in JSON schema
lists DO NOT keep order (and Python lists do), so this must be taken into
account during comparison.
Note this wont check all configurations, only first one that seems to
match, which can lead to wrong results.
:param one: First schema to compare.
:param two: Second schema to compare.
:rtype: `bool`
"""
one = _normalize_string_type(one)
two = _normalize_string_type(two)
_assert_same_types(one, two)
if isinstance(one, list):
return _compare_lists(one, two)
elif isinstance(one, dict):
return _compare_dicts(one, two)
elif isinstance(one, SCALAR_TYPES):
return one == two
elif one is None:
return one is two
else:
raise RuntimeError('Not allowed type "{type}"'.format(
type=type(one).__name__)) | [
"def",
"compare_schemas",
"(",
"one",
",",
"two",
")",
":",
"one",
"=",
"_normalize_string_type",
"(",
"one",
")",
"two",
"=",
"_normalize_string_type",
"(",
"two",
")",
"_assert_same_types",
"(",
"one",
",",
"two",
")",
"if",
"isinstance",
"(",
"one",
",",
"list",
")",
":",
"return",
"_compare_lists",
"(",
"one",
",",
"two",
")",
"elif",
"isinstance",
"(",
"one",
",",
"dict",
")",
":",
"return",
"_compare_dicts",
"(",
"one",
",",
"two",
")",
"elif",
"isinstance",
"(",
"one",
",",
"SCALAR_TYPES",
")",
":",
"return",
"one",
"==",
"two",
"elif",
"one",
"is",
"None",
":",
"return",
"one",
"is",
"two",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Not allowed type \"{type}\"'",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"one",
")",
".",
"__name__",
")",
")"
] | 30.774194 | 0.001016 | [
"def compare_schemas(one, two):\n",
" \"\"\"Compare two structures that represents JSON schemas.\n",
"\n",
" For comparison you can't use normal comparison, because in JSON schema\n",
" lists DO NOT keep order (and Python lists do), so this must be taken into\n",
" account during comparison.\n",
"\n",
" Note this wont check all configurations, only first one that seems to\n",
" match, which can lead to wrong results.\n",
"\n",
" :param one: First schema to compare.\n",
" :param two: Second schema to compare.\n",
" :rtype: `bool`\n",
"\n",
" \"\"\"\n",
" one = _normalize_string_type(one)\n",
" two = _normalize_string_type(two)\n",
"\n",
" _assert_same_types(one, two)\n",
"\n",
" if isinstance(one, list):\n",
" return _compare_lists(one, two)\n",
" elif isinstance(one, dict):\n",
" return _compare_dicts(one, two)\n",
" elif isinstance(one, SCALAR_TYPES):\n",
" return one == two\n",
" elif one is None:\n",
" return one is two\n",
" else:\n",
" raise RuntimeError('Not allowed type \"{type}\"'.format(\n",
" type=type(one).__name__))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703
] | 31 | 0.000872 |
def _handle_timeout(self) -> None:
"""Called by IOLoop when the requested timeout has passed."""
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout) | [
"def",
"_handle_timeout",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"_timeout",
"=",
"None",
"while",
"True",
":",
"try",
":",
"ret",
",",
"num_handles",
"=",
"self",
".",
"_multi",
".",
"socket_action",
"(",
"pycurl",
".",
"SOCKET_TIMEOUT",
",",
"0",
")",
"except",
"pycurl",
".",
"error",
"as",
"e",
":",
"ret",
"=",
"e",
".",
"args",
"[",
"0",
"]",
"if",
"ret",
"!=",
"pycurl",
".",
"E_CALL_MULTI_PERFORM",
":",
"break",
"self",
".",
"_finish_pending_requests",
"(",
")",
"# In theory, we shouldn't have to do this because curl will",
"# call _set_timeout whenever the timeout changes. However,",
"# sometimes after _handle_timeout we will need to reschedule",
"# immediately even though nothing has changed from curl's",
"# perspective. This is because when socket_action is",
"# called with SOCKET_TIMEOUT, libcurl decides internally which",
"# timeouts need to be processed by using a monotonic clock",
"# (where available) while tornado uses python's time.time()",
"# to decide when timeouts have occurred. When those clocks",
"# disagree on elapsed time (as they will whenever there is an",
"# NTP adjustment), tornado might call _handle_timeout before",
"# libcurl is ready. After each timeout, resync the scheduled",
"# timeout with libcurl's current state.",
"new_timeout",
"=",
"self",
".",
"_multi",
".",
"timeout",
"(",
")",
"if",
"new_timeout",
">=",
"0",
":",
"self",
".",
"_set_timeout",
"(",
"new_timeout",
")"
] | 49.785714 | 0.002111 | [
"def _handle_timeout(self) -> None:\n",
" \"\"\"Called by IOLoop when the requested timeout has passed.\"\"\"\n",
" self._timeout = None\n",
" while True:\n",
" try:\n",
" ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0)\n",
" except pycurl.error as e:\n",
" ret = e.args[0]\n",
" if ret != pycurl.E_CALL_MULTI_PERFORM:\n",
" break\n",
" self._finish_pending_requests()\n",
"\n",
" # In theory, we shouldn't have to do this because curl will\n",
" # call _set_timeout whenever the timeout changes. However,\n",
" # sometimes after _handle_timeout we will need to reschedule\n",
" # immediately even though nothing has changed from curl's\n",
" # perspective. This is because when socket_action is\n",
" # called with SOCKET_TIMEOUT, libcurl decides internally which\n",
" # timeouts need to be processed by using a monotonic clock\n",
" # (where available) while tornado uses python's time.time()\n",
" # to decide when timeouts have occurred. When those clocks\n",
" # disagree on elapsed time (as they will whenever there is an\n",
" # NTP adjustment), tornado might call _handle_timeout before\n",
" # libcurl is ready. After each timeout, resync the scheduled\n",
" # timeout with libcurl's current state.\n",
" new_timeout = self._multi.timeout()\n",
" if new_timeout >= 0:\n",
" self._set_timeout(new_timeout)"
] | [
0,
0.014285714285714285,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808
] | 28 | 0.001771 |
def _get_context(center_idx, sentence_boundaries, window_size,
random_window_size, seed):
"""Compute the context with respect to a center word in a sentence.
Takes an numpy array of sentences boundaries.
"""
random.seed(seed + center_idx)
sentence_index = np.searchsorted(sentence_boundaries, center_idx)
sentence_start, sentence_end = _get_sentence_start_end(
sentence_boundaries, sentence_index)
if random_window_size:
window_size = random.randint(1, window_size)
start_idx = max(sentence_start, center_idx - window_size)
end_idx = min(sentence_end, center_idx + window_size + 1)
if start_idx != center_idx and center_idx + 1 != end_idx:
context = np.concatenate((np.arange(start_idx, center_idx),
np.arange(center_idx + 1, end_idx)))
elif start_idx != center_idx:
context = np.arange(start_idx, center_idx)
elif center_idx + 1 != end_idx:
context = np.arange(center_idx + 1, end_idx)
else:
context = None
return context | [
"def",
"_get_context",
"(",
"center_idx",
",",
"sentence_boundaries",
",",
"window_size",
",",
"random_window_size",
",",
"seed",
")",
":",
"random",
".",
"seed",
"(",
"seed",
"+",
"center_idx",
")",
"sentence_index",
"=",
"np",
".",
"searchsorted",
"(",
"sentence_boundaries",
",",
"center_idx",
")",
"sentence_start",
",",
"sentence_end",
"=",
"_get_sentence_start_end",
"(",
"sentence_boundaries",
",",
"sentence_index",
")",
"if",
"random_window_size",
":",
"window_size",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"window_size",
")",
"start_idx",
"=",
"max",
"(",
"sentence_start",
",",
"center_idx",
"-",
"window_size",
")",
"end_idx",
"=",
"min",
"(",
"sentence_end",
",",
"center_idx",
"+",
"window_size",
"+",
"1",
")",
"if",
"start_idx",
"!=",
"center_idx",
"and",
"center_idx",
"+",
"1",
"!=",
"end_idx",
":",
"context",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"arange",
"(",
"start_idx",
",",
"center_idx",
")",
",",
"np",
".",
"arange",
"(",
"center_idx",
"+",
"1",
",",
"end_idx",
")",
")",
")",
"elif",
"start_idx",
"!=",
"center_idx",
":",
"context",
"=",
"np",
".",
"arange",
"(",
"start_idx",
",",
"center_idx",
")",
"elif",
"center_idx",
"+",
"1",
"!=",
"end_idx",
":",
"context",
"=",
"np",
".",
"arange",
"(",
"center_idx",
"+",
"1",
",",
"end_idx",
")",
"else",
":",
"context",
"=",
"None",
"return",
"context"
] | 36.37931 | 0.000923 | [
"def _get_context(center_idx, sentence_boundaries, window_size,\n",
" random_window_size, seed):\n",
" \"\"\"Compute the context with respect to a center word in a sentence.\n",
"\n",
" Takes an numpy array of sentences boundaries.\n",
"\n",
" \"\"\"\n",
" random.seed(seed + center_idx)\n",
"\n",
" sentence_index = np.searchsorted(sentence_boundaries, center_idx)\n",
" sentence_start, sentence_end = _get_sentence_start_end(\n",
" sentence_boundaries, sentence_index)\n",
"\n",
" if random_window_size:\n",
" window_size = random.randint(1, window_size)\n",
" start_idx = max(sentence_start, center_idx - window_size)\n",
" end_idx = min(sentence_end, center_idx + window_size + 1)\n",
"\n",
" if start_idx != center_idx and center_idx + 1 != end_idx:\n",
" context = np.concatenate((np.arange(start_idx, center_idx),\n",
" np.arange(center_idx + 1, end_idx)))\n",
" elif start_idx != center_idx:\n",
" context = np.arange(start_idx, center_idx)\n",
" elif center_idx + 1 != end_idx:\n",
" context = np.arange(center_idx + 1, end_idx)\n",
" else:\n",
" context = None\n",
"\n",
" return context"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 29 | 0.001916 |
def character_embedding_network(char_placeholder: tf.Tensor,
n_characters: int = None,
emb_mat: np.array = None,
char_embedding_dim: int = None,
filter_widths=(3, 4, 5, 7),
highway_on_top=False):
""" Characters to vector. Every sequence of characters (token)
is embedded to vector space with dimensionality char_embedding_dim
Convolution plus max_pooling is used to obtain vector representations
of words.
Args:
char_placeholder: placeholder of int32 type with dimensionality [B, T, C]
B - batch size (can be None)
T - Number of tokens (can be None)
C - number of characters (can be None)
n_characters: total number of unique characters
emb_mat: if n_characters is not provided the emb_mat should be provided
it is a numpy array with dimensions [V, E], where V - vocabulary size
and E - embeddings dimension
char_embedding_dim: dimensionality of characters embeddings
filter_widths: array of width of kernel in convolutional embedding network
used in parallel
Returns:
embeddings: tf.Tensor with dimensionality [B, T, F],
where F is dimensionality of embeddings
"""
if emb_mat is None:
emb_mat = np.random.randn(n_characters, char_embedding_dim).astype(np.float32) / np.sqrt(char_embedding_dim)
else:
char_embedding_dim = emb_mat.shape[1]
char_emb_var = tf.Variable(emb_mat, trainable=True)
with tf.variable_scope('Char_Emb_Network'):
# Character embedding layer
c_emb = tf.nn.embedding_lookup(char_emb_var, char_placeholder)
# Character embedding network
conv_results_list = []
for filter_width in filter_widths:
conv_results_list.append(tf.layers.conv2d(c_emb,
char_embedding_dim,
(1, filter_width),
padding='same',
kernel_initializer=INITIALIZER))
units = tf.concat(conv_results_list, axis=3)
units = tf.reduce_max(units, axis=2)
if highway_on_top:
sigmoid_gate = tf.layers.dense(units,
1,
activation=tf.sigmoid,
kernel_initializer=INITIALIZER,
kernel_regularizer=tf.nn.l2_loss)
deeper_units = tf.layers.dense(units,
tf.shape(units)[-1],
kernel_initializer=INITIALIZER,
kernel_regularizer=tf.nn.l2_loss)
units = sigmoid_gate * units + (1 - sigmoid_gate) * deeper_units
units = tf.nn.relu(units)
return units | [
"def",
"character_embedding_network",
"(",
"char_placeholder",
":",
"tf",
".",
"Tensor",
",",
"n_characters",
":",
"int",
"=",
"None",
",",
"emb_mat",
":",
"np",
".",
"array",
"=",
"None",
",",
"char_embedding_dim",
":",
"int",
"=",
"None",
",",
"filter_widths",
"=",
"(",
"3",
",",
"4",
",",
"5",
",",
"7",
")",
",",
"highway_on_top",
"=",
"False",
")",
":",
"if",
"emb_mat",
"is",
"None",
":",
"emb_mat",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"n_characters",
",",
"char_embedding_dim",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"np",
".",
"sqrt",
"(",
"char_embedding_dim",
")",
"else",
":",
"char_embedding_dim",
"=",
"emb_mat",
".",
"shape",
"[",
"1",
"]",
"char_emb_var",
"=",
"tf",
".",
"Variable",
"(",
"emb_mat",
",",
"trainable",
"=",
"True",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"'Char_Emb_Network'",
")",
":",
"# Character embedding layer",
"c_emb",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"char_emb_var",
",",
"char_placeholder",
")",
"# Character embedding network",
"conv_results_list",
"=",
"[",
"]",
"for",
"filter_width",
"in",
"filter_widths",
":",
"conv_results_list",
".",
"append",
"(",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"c_emb",
",",
"char_embedding_dim",
",",
"(",
"1",
",",
"filter_width",
")",
",",
"padding",
"=",
"'same'",
",",
"kernel_initializer",
"=",
"INITIALIZER",
")",
")",
"units",
"=",
"tf",
".",
"concat",
"(",
"conv_results_list",
",",
"axis",
"=",
"3",
")",
"units",
"=",
"tf",
".",
"reduce_max",
"(",
"units",
",",
"axis",
"=",
"2",
")",
"if",
"highway_on_top",
":",
"sigmoid_gate",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"units",
",",
"1",
",",
"activation",
"=",
"tf",
".",
"sigmoid",
",",
"kernel_initializer",
"=",
"INITIALIZER",
",",
"kernel_regularizer",
"=",
"tf",
".",
"nn",
".",
"l2_loss",
")",
"deeper_units",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"units",
",",
"tf",
".",
"shape",
"(",
"units",
")",
"[",
"-",
"1",
"]",
",",
"kernel_initializer",
"=",
"INITIALIZER",
",",
"kernel_regularizer",
"=",
"tf",
".",
"nn",
".",
"l2_loss",
")",
"units",
"=",
"sigmoid_gate",
"*",
"units",
"+",
"(",
"1",
"-",
"sigmoid_gate",
")",
"*",
"deeper_units",
"units",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"units",
")",
"return",
"units"
] | 51.05 | 0.001922 | [
"def character_embedding_network(char_placeholder: tf.Tensor,\n",
" n_characters: int = None,\n",
" emb_mat: np.array = None,\n",
" char_embedding_dim: int = None,\n",
" filter_widths=(3, 4, 5, 7),\n",
" highway_on_top=False):\n",
" \"\"\" Characters to vector. Every sequence of characters (token)\n",
" is embedded to vector space with dimensionality char_embedding_dim\n",
" Convolution plus max_pooling is used to obtain vector representations\n",
" of words.\n",
"\n",
" Args:\n",
" char_placeholder: placeholder of int32 type with dimensionality [B, T, C]\n",
" B - batch size (can be None)\n",
" T - Number of tokens (can be None)\n",
" C - number of characters (can be None)\n",
" n_characters: total number of unique characters\n",
" emb_mat: if n_characters is not provided the emb_mat should be provided\n",
" it is a numpy array with dimensions [V, E], where V - vocabulary size\n",
" and E - embeddings dimension\n",
" char_embedding_dim: dimensionality of characters embeddings\n",
" filter_widths: array of width of kernel in convolutional embedding network\n",
" used in parallel\n",
"\n",
" Returns:\n",
" embeddings: tf.Tensor with dimensionality [B, T, F],\n",
" where F is dimensionality of embeddings\n",
" \"\"\"\n",
" if emb_mat is None:\n",
" emb_mat = np.random.randn(n_characters, char_embedding_dim).astype(np.float32) / np.sqrt(char_embedding_dim)\n",
" else:\n",
" char_embedding_dim = emb_mat.shape[1]\n",
" char_emb_var = tf.Variable(emb_mat, trainable=True)\n",
" with tf.variable_scope('Char_Emb_Network'):\n",
" # Character embedding layer\n",
" c_emb = tf.nn.embedding_lookup(char_emb_var, char_placeholder)\n",
"\n",
" # Character embedding network\n",
" conv_results_list = []\n",
" for filter_width in filter_widths:\n",
" conv_results_list.append(tf.layers.conv2d(c_emb,\n",
" char_embedding_dim,\n",
" (1, filter_width),\n",
" padding='same',\n",
" kernel_initializer=INITIALIZER))\n",
" units = tf.concat(conv_results_list, axis=3)\n",
" units = tf.reduce_max(units, axis=2)\n",
" if highway_on_top:\n",
" sigmoid_gate = tf.layers.dense(units,\n",
" 1,\n",
" activation=tf.sigmoid,\n",
" kernel_initializer=INITIALIZER,\n",
" kernel_regularizer=tf.nn.l2_loss)\n",
" deeper_units = tf.layers.dense(units,\n",
" tf.shape(units)[-1],\n",
" kernel_initializer=INITIALIZER,\n",
" kernel_regularizer=tf.nn.l2_loss)\n",
" units = sigmoid_gate * units + (1 - sigmoid_gate) * deeper_units\n",
" units = tf.nn.relu(units)\n",
" return units"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 60 | 0.001983 |
def analyze(self, mode=None, timesteps=None):
"""Analyzes the grid by power flow analysis
Analyze the grid for violations of hosting capacity. Means, perform a
power flow analysis and obtain voltages at nodes (load, generator,
stations/transformers and branch tees) and active/reactive power at
lines.
The power flow analysis can currently only be performed for both grid
levels MV and LV. See ToDos section for more information.
A static `non-linear power flow analysis is performed using PyPSA
<https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_.
The high-voltage to medium-voltage transformer are not included in the
analysis. The slack bus is defined at secondary side of these
transformers assuming an ideal tap changer. Hence, potential
overloading of the transformers is not studied here.
Parameters
----------
mode : str
Allows to toggle between power flow analysis (PFA) on the whole
grid topology (MV + LV), only MV or only LV. Defaults to None which
equals power flow analysis for MV + LV which is the only
implemented option at the moment. See ToDos section for
more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies for which time steps to conduct the power flow
analysis. It defaults to None in which case the time steps in
timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are
used.
Notes
-----
The current implementation always translates the grid topology
representation to the PyPSA format and stores it to
:attr:`self.network.pypsa`.
ToDos
------
The option to export only the edisgo MV grid (mode = 'mv') to conduct
a power flow analysis is implemented in
:func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised
since the rest of edisgo does not handle this option yet. The analyze
function will throw an error since
:func:`~.tools.pypsa_io.process_pfa_results`
does not handle aggregated loads and generators in the LV grids. Also,
grid reinforcement, pypsa update of time series, and probably other
functionalities do not work when only the MV grid is analysed.
Further ToDos are:
* explain how power plants are modeled, if possible use a link
* explain where to find and adjust power flow analysis defining
parameters
See Also
--------
:func:`~.tools.pypsa_io.to_pypsa`
Translator to PyPSA data format
"""
if timesteps is None:
timesteps = self.network.timeseries.timeindex
# check if timesteps is array-like, otherwise convert to list
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
if self.network.pypsa is None:
# Translate eDisGo grid topology representation to PyPSA format
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode, timesteps)
else:
if self.network.pypsa.edisgo_mode is not mode:
# Translate eDisGo grid topology representation to PyPSA format
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode, timesteps)
# check if all timesteps are in pypsa.snapshots, if not update time
# series
if False in [True if _ in self.network.pypsa.snapshots else False
for _ in timesteps]:
pypsa_io.update_pypsa_timeseries(self.network, timesteps=timesteps)
# run power flow analysis
pf_results = self.network.pypsa.pf(timesteps)
if all(pf_results['converged']['0'].tolist()):
pypsa_io.process_pfa_results(
self.network, self.network.pypsa, timesteps)
else:
raise ValueError("Power flow analysis did not converge.") | [
"def",
"analyze",
"(",
"self",
",",
"mode",
"=",
"None",
",",
"timesteps",
"=",
"None",
")",
":",
"if",
"timesteps",
"is",
"None",
":",
"timesteps",
"=",
"self",
".",
"network",
".",
"timeseries",
".",
"timeindex",
"# check if timesteps is array-like, otherwise convert to list",
"if",
"not",
"hasattr",
"(",
"timesteps",
",",
"\"__len__\"",
")",
":",
"timesteps",
"=",
"[",
"timesteps",
"]",
"if",
"self",
".",
"network",
".",
"pypsa",
"is",
"None",
":",
"# Translate eDisGo grid topology representation to PyPSA format",
"self",
".",
"network",
".",
"pypsa",
"=",
"pypsa_io",
".",
"to_pypsa",
"(",
"self",
".",
"network",
",",
"mode",
",",
"timesteps",
")",
"else",
":",
"if",
"self",
".",
"network",
".",
"pypsa",
".",
"edisgo_mode",
"is",
"not",
"mode",
":",
"# Translate eDisGo grid topology representation to PyPSA format",
"self",
".",
"network",
".",
"pypsa",
"=",
"pypsa_io",
".",
"to_pypsa",
"(",
"self",
".",
"network",
",",
"mode",
",",
"timesteps",
")",
"# check if all timesteps are in pypsa.snapshots, if not update time",
"# series",
"if",
"False",
"in",
"[",
"True",
"if",
"_",
"in",
"self",
".",
"network",
".",
"pypsa",
".",
"snapshots",
"else",
"False",
"for",
"_",
"in",
"timesteps",
"]",
":",
"pypsa_io",
".",
"update_pypsa_timeseries",
"(",
"self",
".",
"network",
",",
"timesteps",
"=",
"timesteps",
")",
"# run power flow analysis",
"pf_results",
"=",
"self",
".",
"network",
".",
"pypsa",
".",
"pf",
"(",
"timesteps",
")",
"if",
"all",
"(",
"pf_results",
"[",
"'converged'",
"]",
"[",
"'0'",
"]",
".",
"tolist",
"(",
")",
")",
":",
"pypsa_io",
".",
"process_pfa_results",
"(",
"self",
".",
"network",
",",
"self",
".",
"network",
".",
"pypsa",
",",
"timesteps",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Power flow analysis did not converge.\"",
")"
] | 45.1 | 0.000723 | [
"def analyze(self, mode=None, timesteps=None):\n",
" \"\"\"Analyzes the grid by power flow analysis\n",
"\n",
" Analyze the grid for violations of hosting capacity. Means, perform a\n",
" power flow analysis and obtain voltages at nodes (load, generator,\n",
" stations/transformers and branch tees) and active/reactive power at\n",
" lines.\n",
"\n",
" The power flow analysis can currently only be performed for both grid\n",
" levels MV and LV. See ToDos section for more information.\n",
"\n",
" A static `non-linear power flow analysis is performed using PyPSA\n",
" <https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_.\n",
" The high-voltage to medium-voltage transformer are not included in the\n",
" analysis. The slack bus is defined at secondary side of these\n",
" transformers assuming an ideal tap changer. Hence, potential\n",
" overloading of the transformers is not studied here.\n",
"\n",
" Parameters\n",
" ----------\n",
" mode : str\n",
" Allows to toggle between power flow analysis (PFA) on the whole\n",
" grid topology (MV + LV), only MV or only LV. Defaults to None which\n",
" equals power flow analysis for MV + LV which is the only\n",
" implemented option at the moment. See ToDos section for\n",
" more information.\n",
" timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`\n",
" Timesteps specifies for which time steps to conduct the power flow\n",
" analysis. It defaults to None in which case the time steps in\n",
" timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are\n",
" used.\n",
"\n",
" Notes\n",
" -----\n",
" The current implementation always translates the grid topology\n",
" representation to the PyPSA format and stores it to\n",
" :attr:`self.network.pypsa`.\n",
"\n",
" ToDos\n",
" ------\n",
" The option to export only the edisgo MV grid (mode = 'mv') to conduct\n",
" a power flow analysis is implemented in\n",
" :func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised\n",
" since the rest of edisgo does not handle this option yet. The analyze\n",
" function will throw an error since\n",
" :func:`~.tools.pypsa_io.process_pfa_results`\n",
" does not handle aggregated loads and generators in the LV grids. Also,\n",
" grid reinforcement, pypsa update of time series, and probably other\n",
" functionalities do not work when only the MV grid is analysed.\n",
"\n",
" Further ToDos are:\n",
" * explain how power plants are modeled, if possible use a link\n",
" * explain where to find and adjust power flow analysis defining\n",
" parameters\n",
"\n",
" See Also\n",
" --------\n",
" :func:`~.tools.pypsa_io.to_pypsa`\n",
" Translator to PyPSA data format\n",
"\n",
" \"\"\"\n",
" if timesteps is None:\n",
" timesteps = self.network.timeseries.timeindex\n",
" # check if timesteps is array-like, otherwise convert to list\n",
" if not hasattr(timesteps, \"__len__\"):\n",
" timesteps = [timesteps]\n",
"\n",
" if self.network.pypsa is None:\n",
" # Translate eDisGo grid topology representation to PyPSA format\n",
" self.network.pypsa = pypsa_io.to_pypsa(\n",
" self.network, mode, timesteps)\n",
" else:\n",
" if self.network.pypsa.edisgo_mode is not mode:\n",
" # Translate eDisGo grid topology representation to PyPSA format\n",
" self.network.pypsa = pypsa_io.to_pypsa(\n",
" self.network, mode, timesteps)\n",
"\n",
" # check if all timesteps are in pypsa.snapshots, if not update time\n",
" # series\n",
" if False in [True if _ in self.network.pypsa.snapshots else False\n",
" for _ in timesteps]:\n",
" pypsa_io.update_pypsa_timeseries(self.network, timesteps=timesteps)\n",
" # run power flow analysis\n",
" pf_results = self.network.pypsa.pf(timesteps)\n",
"\n",
" if all(pf_results['converged']['0'].tolist()):\n",
" pypsa_io.process_pfa_results(\n",
" self.network, self.network.pypsa, timesteps)\n",
" else:\n",
" raise ValueError(\"Power flow analysis did not converge.\")"
] | [
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406
] | 90 | 0.000479 |
def set_trace(host=None, port=None, patch_stdstreams=False):
"""
Opens a remote PDB on first available port.
"""
if host is None:
host = os.environ.get('REMOTE_PDB_HOST', '127.0.0.1')
if port is None:
port = int(os.environ.get('REMOTE_PDB_PORT', '0'))
rdb = RemotePdb(host=host, port=port, patch_stdstreams=patch_stdstreams)
rdb.set_trace(frame=sys._getframe().f_back) | [
"def",
"set_trace",
"(",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"patch_stdstreams",
"=",
"False",
")",
":",
"if",
"host",
"is",
"None",
":",
"host",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'REMOTE_PDB_HOST'",
",",
"'127.0.0.1'",
")",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"int",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'REMOTE_PDB_PORT'",
",",
"'0'",
")",
")",
"rdb",
"=",
"RemotePdb",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"patch_stdstreams",
"=",
"patch_stdstreams",
")",
"rdb",
".",
"set_trace",
"(",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
")",
".",
"f_back",
")"
] | 40.3 | 0.002427 | [
"def set_trace(host=None, port=None, patch_stdstreams=False):\n",
" \"\"\"\n",
" Opens a remote PDB on first available port.\n",
" \"\"\"\n",
" if host is None:\n",
" host = os.environ.get('REMOTE_PDB_HOST', '127.0.0.1')\n",
" if port is None:\n",
" port = int(os.environ.get('REMOTE_PDB_PORT', '0'))\n",
" rdb = RemotePdb(host=host, port=port, patch_stdstreams=patch_stdstreams)\n",
" rdb.set_trace(frame=sys._getframe().f_back)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085
] | 10 | 0.002128 |
def truncate_string(value, max_width=None):
"""Truncate string values."""
if isinstance(value, text_type) and max_width is not None and len(value) > max_width:
return value[:max_width]
return value | [
"def",
"truncate_string",
"(",
"value",
",",
"max_width",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"text_type",
")",
"and",
"max_width",
"is",
"not",
"None",
"and",
"len",
"(",
"value",
")",
">",
"max_width",
":",
"return",
"value",
"[",
":",
"max_width",
"]",
"return",
"value"
] | 42.6 | 0.009217 | [
"def truncate_string(value, max_width=None):\n",
" \"\"\"Truncate string values.\"\"\"\n",
" if isinstance(value, text_type) and max_width is not None and len(value) > max_width:\n",
" return value[:max_width]\n",
" return value"
] | [
0,
0,
0.011111111111111112,
0,
0.0625
] | 5 | 0.014722 |
def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None,
issue_key=None, start_index=0, max_results=25):
"""
Get Plan results
:param project_key:
:param plan_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:return:
"""
return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled,
label=label, issue_key=issue_key, start_index=start_index, max_results=max_results) | [
"def",
"plan_results",
"(",
"self",
",",
"project_key",
",",
"plan_key",
",",
"expand",
"=",
"None",
",",
"favourite",
"=",
"False",
",",
"clover_enabled",
"=",
"False",
",",
"label",
"=",
"None",
",",
"issue_key",
"=",
"None",
",",
"start_index",
"=",
"0",
",",
"max_results",
"=",
"25",
")",
":",
"return",
"self",
".",
"results",
"(",
"project_key",
",",
"plan_key",
",",
"expand",
"=",
"expand",
",",
"favourite",
"=",
"favourite",
",",
"clover_enabled",
"=",
"clover_enabled",
",",
"label",
"=",
"label",
",",
"issue_key",
"=",
"issue_key",
",",
"start_index",
"=",
"start_index",
",",
"max_results",
"=",
"max_results",
")"
] | 40.882353 | 0.008439 | [
"def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None,\n",
" issue_key=None, start_index=0, max_results=25):\n",
" \"\"\"\n",
" Get Plan results\n",
" :param project_key:\n",
" :param plan_key:\n",
" :param expand:\n",
" :param favourite:\n",
" :param clover_enabled:\n",
" :param label:\n",
" :param issue_key:\n",
" :param start_index:\n",
" :param max_results:\n",
" :return:\n",
" \"\"\"\n",
" return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled,\n",
" label=label, issue_key=issue_key, start_index=start_index, max_results=max_results)"
] | [
0.00909090909090909,
0.014492753623188406,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0.018018018018018018
] | 17 | 0.007848 |
def cylinder(target, throat_length='throat.length',
throat_diameter='throat.diameter'):
r"""
Calculate throat volume assuing a cylindrical shape
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_length and throat_diameter : strings
The dictionary keys containing the arrays with the throat diameter and
length values.
Notes
-----
At present this models does NOT account for the volume reprsented by the
intersection of the throat with a spherical pore body.
"""
leng = target[throat_length]
diam = target[throat_diameter]
value = _sp.pi/4*leng*diam**2
return value | [
"def",
"cylinder",
"(",
"target",
",",
"throat_length",
"=",
"'throat.length'",
",",
"throat_diameter",
"=",
"'throat.diameter'",
")",
":",
"leng",
"=",
"target",
"[",
"throat_length",
"]",
"diam",
"=",
"target",
"[",
"throat_diameter",
"]",
"value",
"=",
"_sp",
".",
"pi",
"/",
"4",
"*",
"leng",
"*",
"diam",
"**",
"2",
"return",
"value"
] | 32.52 | 0.001195 | [
"def cylinder(target, throat_length='throat.length',\n",
" throat_diameter='throat.diameter'):\n",
" r\"\"\"\n",
" Calculate throat volume assuing a cylindrical shape\n",
"\n",
" Parameters\n",
" ----------\n",
" target : OpenPNM Object\n",
" The object which this model is associated with. This controls the\n",
" length of the calculated array, and also provides access to other\n",
" necessary properties.\n",
"\n",
" throat_length and throat_diameter : strings\n",
" The dictionary keys containing the arrays with the throat diameter and\n",
" length values.\n",
"\n",
" Notes\n",
" -----\n",
" At present this models does NOT account for the volume reprsented by the\n",
" intersection of the throat with a spherical pore body.\n",
" \"\"\"\n",
" leng = target[throat_length]\n",
" diam = target[throat_diameter]\n",
" value = _sp.pi/4*leng*diam**2\n",
" return value"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 25 | 0.0025 |
def merge_webhooks_runset(runset):
"""Make some statistics on the run set.
"""
min_started_at = min([w['started_at'] for w in runset])
max_ended_at = max([w['ended_at'] for w in runset])
ellapse = max_ended_at - min_started_at
errors_count = sum(1 for w in runset if 'error' in w)
total_count = len(runset)
data = dict(
ellapse=ellapse,
errors_count=errors_count,
total_count=total_count,
)
return data | [
"def",
"merge_webhooks_runset",
"(",
"runset",
")",
":",
"min_started_at",
"=",
"min",
"(",
"[",
"w",
"[",
"'started_at'",
"]",
"for",
"w",
"in",
"runset",
"]",
")",
"max_ended_at",
"=",
"max",
"(",
"[",
"w",
"[",
"'ended_at'",
"]",
"for",
"w",
"in",
"runset",
"]",
")",
"ellapse",
"=",
"max_ended_at",
"-",
"min_started_at",
"errors_count",
"=",
"sum",
"(",
"1",
"for",
"w",
"in",
"runset",
"if",
"'error'",
"in",
"w",
")",
"total_count",
"=",
"len",
"(",
"runset",
")",
"data",
"=",
"dict",
"(",
"ellapse",
"=",
"ellapse",
",",
"errors_count",
"=",
"errors_count",
",",
"total_count",
"=",
"total_count",
",",
")",
"return",
"data"
] | 26.647059 | 0.002132 | [
"def merge_webhooks_runset(runset):\n",
" \"\"\"Make some statistics on the run set.\n",
"\n",
" \"\"\"\n",
" min_started_at = min([w['started_at'] for w in runset])\n",
" max_ended_at = max([w['ended_at'] for w in runset])\n",
" ellapse = max_ended_at - min_started_at\n",
" errors_count = sum(1 for w in runset if 'error' in w)\n",
" total_count = len(runset)\n",
"\n",
" data = dict(\n",
" ellapse=ellapse,\n",
" errors_count=errors_count,\n",
" total_count=total_count,\n",
" )\n",
"\n",
" return data"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 17 | 0.003922 |
def _string(self):
""":return: the string from a :class:`io.StringIO`"""
file = StringIO()
self.__dump_to_file(file)
file.seek(0)
return file.read() | [
"def",
"_string",
"(",
"self",
")",
":",
"file",
"=",
"StringIO",
"(",
")",
"self",
".",
"__dump_to_file",
"(",
"file",
")",
"file",
".",
"seek",
"(",
"0",
")",
"return",
"file",
".",
"read",
"(",
")"
] | 30.5 | 0.010638 | [
"def _string(self):\n",
" \"\"\":return: the string from a :class:`io.StringIO`\"\"\"\n",
" file = StringIO()\n",
" self.__dump_to_file(file)\n",
" file.seek(0)\n",
" return file.read()"
] | [
0,
0.016129032258064516,
0,
0,
0,
0.038461538461538464
] | 6 | 0.009098 |
def idaunpack(buf):
"""
Special data packing format, used in struct definitions, and .id2 files
sdk functions: pack_dd etc.
"""
buf = bytearray(buf)
def nextval(o):
val = buf[o] ; o += 1
if val == 0xff: # 32 bit value
val, = struct.unpack_from(">L", buf, o)
o += 4
return val, o
if val < 0x80: # 7 bit value
return val, o
val <<= 8
val |= buf[o] ; o += 1
if val < 0xc000: # 14 bit value
return val & 0x3fff, o
# 29 bit value
val <<= 8
val |= buf[o] ; o += 1
val <<= 8
val |= buf[o] ; o += 1
return val & 0x1fffffff, o
values = []
o = 0
while o < len(buf):
val, o = nextval(o)
values.append(val)
return values | [
"def",
"idaunpack",
"(",
"buf",
")",
":",
"buf",
"=",
"bytearray",
"(",
"buf",
")",
"def",
"nextval",
"(",
"o",
")",
":",
"val",
"=",
"buf",
"[",
"o",
"]",
"o",
"+=",
"1",
"if",
"val",
"==",
"0xff",
":",
"# 32 bit value\r",
"val",
",",
"=",
"struct",
".",
"unpack_from",
"(",
"\">L\"",
",",
"buf",
",",
"o",
")",
"o",
"+=",
"4",
"return",
"val",
",",
"o",
"if",
"val",
"<",
"0x80",
":",
"# 7 bit value\r",
"return",
"val",
",",
"o",
"val",
"<<=",
"8",
"val",
"|=",
"buf",
"[",
"o",
"]",
"o",
"+=",
"1",
"if",
"val",
"<",
"0xc000",
":",
"# 14 bit value\r",
"return",
"val",
"&",
"0x3fff",
",",
"o",
"# 29 bit value\r",
"val",
"<<=",
"8",
"val",
"|=",
"buf",
"[",
"o",
"]",
"o",
"+=",
"1",
"val",
"<<=",
"8",
"val",
"|=",
"buf",
"[",
"o",
"]",
"o",
"+=",
"1",
"return",
"val",
"&",
"0x1fffffff",
",",
"o",
"values",
"=",
"[",
"]",
"o",
"=",
"0",
"while",
"o",
"<",
"len",
"(",
"buf",
")",
":",
"val",
",",
"o",
"=",
"nextval",
"(",
"o",
")",
"values",
".",
"append",
"(",
"val",
")",
"return",
"values"
] | 24.323529 | 0.010465 | [
"def idaunpack(buf):\r\n",
" \"\"\"\r\n",
" Special data packing format, used in struct definitions, and .id2 files\r\n",
"\r\n",
" sdk functions: pack_dd etc.\r\n",
" \"\"\"\r\n",
" buf = bytearray(buf)\r\n",
"\r\n",
" def nextval(o):\r\n",
" val = buf[o] ; o += 1\r\n",
" if val == 0xff: # 32 bit value\r\n",
" val, = struct.unpack_from(\">L\", buf, o)\r\n",
" o += 4\r\n",
" return val, o\r\n",
" if val < 0x80: # 7 bit value\r\n",
" return val, o\r\n",
" val <<= 8\r\n",
" val |= buf[o] ; o += 1\r\n",
" if val < 0xc000: # 14 bit value\r\n",
" return val & 0x3fff, o\r\n",
"\r\n",
" # 29 bit value\r\n",
" val <<= 8\r\n",
" val |= buf[o] ; o += 1\r\n",
" val <<= 8\r\n",
" val |= buf[o] ; o += 1\r\n",
" return val & 0x1fffffff, o\r\n",
"\r\n",
" values = []\r\n",
" o = 0\r\n",
" while o < len(buf):\r\n",
" val, o = nextval(o)\r\n",
" values.append(val)\r\n",
" return values"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06451612903225806,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0.0625,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 34 | 0.009142 |
def get_all_requisite_objectives(self, objective_id=None):
"""Gets a list of Objectives that are the requisites for the given
Objective including the requistes of the requisites, and so on.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an Objective is not found or
inaccessible. Otherwise, inaccessible Objectives may be omitted
from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): Id of the Objective
return: (osid.learning.ObjectiveList) - the returned Objective
list
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
# This should be re-implemented if and when handcar supports
# getting all requisites directly
requisites = list()
requisite_ids = list()
all_requisites = self._get_requisites_recursively(objective_id, requisites, requisite_ids)
return objects.ObjectiveList(all_requisites) | [
"def",
"get_all_requisite_objectives",
"(",
"self",
",",
"objective_id",
"=",
"None",
")",
":",
"# This should be re-implemented if and when handcar supports",
"# getting all requisites directly",
"requisites",
"=",
"list",
"(",
")",
"requisite_ids",
"=",
"list",
"(",
")",
"all_requisites",
"=",
"self",
".",
"_get_requisites_recursively",
"(",
"objective_id",
",",
"requisites",
",",
"requisite_ids",
")",
"return",
"objects",
".",
"ObjectiveList",
"(",
"all_requisites",
")"
] | 49.230769 | 0.002299 | [
"def get_all_requisite_objectives(self, objective_id=None):\n",
" \"\"\"Gets a list of Objectives that are the requisites for the given\n",
" Objective including the requistes of the requisites, and so on.\n",
"\n",
" In plenary mode, the returned list contains all of the immediate\n",
" requisites, or an error results if an Objective is not found or\n",
" inaccessible. Otherwise, inaccessible Objectives may be omitted\n",
" from the list and may present the elements in any order\n",
" including returning a unique set.\n",
"\n",
" arg: objective_id (osid.id.Id): Id of the Objective\n",
" return: (osid.learning.ObjectiveList) - the returned Objective\n",
" list\n",
" raise: NotFound - objective_id not found\n",
" raise: NullArgument - objective_id is null\n",
" raise: OperationFailed - unable to complete request\n",
" raise: PermissionDenied - authorization failure\n",
" compliance: mandatory - This method must be implemented.\n",
"\n",
" \"\"\"\n",
" # This should be re-implemented if and when handcar supports\n",
" # getting all requisites directly\n",
" requisites = list()\n",
" requisite_ids = list()\n",
" all_requisites = self._get_requisites_recursively(objective_id, requisites, requisite_ids)\n",
" return objects.ObjectiveList(all_requisites)"
] | [
0,
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0.019230769230769232
] | 26 | 0.001641 |
def _updateParamsFrom(self, *args, **kwargs):
"""
:note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom`
"""
for o in self:
o._updateParamsFrom(*args, **kwargs) | [
"def",
"_updateParamsFrom",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"o",
"in",
"self",
":",
"o",
".",
"_updateParamsFrom",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 39 | 0.012552 | [
"def _updateParamsFrom(self, *args, **kwargs):\n",
" \"\"\"\n",
" :note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom`\n",
" \"\"\"\n",
" for o in self:\n",
" o._updateParamsFrom(*args, **kwargs)"
] | [
0,
0.08333333333333333,
0.01020408163265306,
0,
0,
0.020833333333333332
] | 6 | 0.019062 |
def compute_mem_overhead(self):
"""Returns memory overhead."""
self.mem_overhead = (self._process.memory_info().rss -
builtins.initial_rss_size) | [
"def",
"compute_mem_overhead",
"(",
"self",
")",
":",
"self",
".",
"mem_overhead",
"=",
"(",
"self",
".",
"_process",
".",
"memory_info",
"(",
")",
".",
"rss",
"-",
"builtins",
".",
"initial_rss_size",
")"
] | 46.5 | 0.010582 | [
"def compute_mem_overhead(self):\n",
" \"\"\"Returns memory overhead.\"\"\"\n",
" self.mem_overhead = (self._process.memory_info().rss -\n",
" builtins.initial_rss_size)"
] | [
0,
0.02564102564102564,
0,
0.01818181818181818
] | 4 | 0.010956 |
def colour_rgb(self):
"""Return colour as RGB value"""
hexvalue = self.status()[self.DPS][self.DPS_INDEX_COLOUR]
return BulbDevice._hexvalue_to_rgb(hexvalue) | [
"def",
"colour_rgb",
"(",
"self",
")",
":",
"hexvalue",
"=",
"self",
".",
"status",
"(",
")",
"[",
"self",
".",
"DPS",
"]",
"[",
"self",
".",
"DPS_INDEX_COLOUR",
"]",
"return",
"BulbDevice",
".",
"_hexvalue_to_rgb",
"(",
"hexvalue",
")"
] | 44.5 | 0.01105 | [
"def colour_rgb(self):\n",
" \"\"\"Return colour as RGB value\"\"\"\n",
" hexvalue = self.status()[self.DPS][self.DPS_INDEX_COLOUR]\n",
" return BulbDevice._hexvalue_to_rgb(hexvalue)"
] | [
0,
0.024390243902439025,
0,
0.019230769230769232
] | 4 | 0.010905 |
def EQ106(T, Tc, A, B, C=0, D=0, E=0):
r'''DIPPR Equation #106. Often used in calculating liquid surface tension,
and heat of vaporization.
Only parameters A and B parameters are required; many fits include no
further parameters. Critical temperature is also required.
.. math::
Y = A(1-T_r)^{B + C T_r + D T_r^2 + E T_r^3}
Tr = \frac{T}{Tc}
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
Returns
-------
Y : float
Property [constant-specific]
Notes
-----
The integral could not be found, but the integral over T actually could,
again in terms of hypergeometric functions.
Examples
--------
Water surface tension; DIPPR coefficients normally in Pa*s.
>>> EQ106(300, 647.096, 0.17766, 2.567, -3.3377, 1.9699)
0.07231499373541
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
Tr = T/Tc
return A*(1. - Tr)**(B + Tr*(C + Tr*(D + E*Tr))) | [
"def",
"EQ106",
"(",
"T",
",",
"Tc",
",",
"A",
",",
"B",
",",
"C",
"=",
"0",
",",
"D",
"=",
"0",
",",
"E",
"=",
"0",
")",
":",
"Tr",
"=",
"T",
"/",
"Tc",
"return",
"A",
"*",
"(",
"1.",
"-",
"Tr",
")",
"**",
"(",
"B",
"+",
"Tr",
"*",
"(",
"C",
"+",
"Tr",
"*",
"(",
"D",
"+",
"E",
"*",
"Tr",
")",
")",
")"
] | 25.909091 | 0.000845 | [
"def EQ106(T, Tc, A, B, C=0, D=0, E=0):\n",
" r'''DIPPR Equation #106. Often used in calculating liquid surface tension,\n",
" and heat of vaporization.\n",
" Only parameters A and B parameters are required; many fits include no\n",
" further parameters. Critical temperature is also required.\n",
"\n",
" .. math::\n",
" Y = A(1-T_r)^{B + C T_r + D T_r^2 + E T_r^3}\n",
"\n",
" Tr = \\frac{T}{Tc}\n",
"\n",
" Parameters\n",
" ----------\n",
" T : float\n",
" Temperature, [K]\n",
" Tc : float\n",
" Critical temperature, [K]\n",
" A-D : float\n",
" Parameter for the equation; chemical and property specific [-]\n",
"\n",
" Returns\n",
" -------\n",
" Y : float\n",
" Property [constant-specific]\n",
"\n",
" Notes\n",
" -----\n",
" The integral could not be found, but the integral over T actually could,\n",
" again in terms of hypergeometric functions.\n",
"\n",
" Examples\n",
" --------\n",
" Water surface tension; DIPPR coefficients normally in Pa*s.\n",
"\n",
" >>> EQ106(300, 647.096, 0.17766, 2.567, -3.3377, 1.9699)\n",
" 0.07231499373541\n",
"\n",
" References\n",
" ----------\n",
" .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801\n",
" DIPPR/AIChE\n",
" '''\n",
" Tr = T/Tc\n",
" return A*(1. - Tr)**(B + Tr*(C + Tr*(D + E*Tr)))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232
] | 44 | 0.000437 |
def append_position_to_token_list(token_list):
"""Converts a list of Token into a list of Token, asuming size == 1"""
return [PositionToken(value.content, value.gd, index, index+1) for (index, value) in enumerate(token_list)] | [
"def",
"append_position_to_token_list",
"(",
"token_list",
")",
":",
"return",
"[",
"PositionToken",
"(",
"value",
".",
"content",
",",
"value",
".",
"gd",
",",
"index",
",",
"index",
"+",
"1",
")",
"for",
"(",
"index",
",",
"value",
")",
"in",
"enumerate",
"(",
"token_list",
")",
"]"
] | 77 | 0.008584 | [
"def append_position_to_token_list(token_list):\n",
" \"\"\"Converts a list of Token into a list of Token, asuming size == 1\"\"\"\n",
" return [PositionToken(value.content, value.gd, index, index+1) for (index, value) in enumerate(token_list)]"
] | [
0,
0,
0.018018018018018018
] | 3 | 0.006006 |
def delete_mappings_in_network(network_id, network_2_id=None, **kwargs):
"""
Delete all the resource attribute mappings in a network. If another network
is specified, only delete the mappings between the two networks.
"""
qry = db.DBSession.query(ResourceAttrMap).filter(or_(ResourceAttrMap.network_a_id == network_id, ResourceAttrMap.network_b_id == network_id))
if network_2_id is not None:
qry = qry.filter(or_(ResourceAttrMap.network_a_id==network_2_id, ResourceAttrMap.network_b_id==network_2_id))
mappings = qry.all()
for m in mappings:
db.DBSession.delete(m)
db.DBSession.flush()
return 'OK' | [
"def",
"delete_mappings_in_network",
"(",
"network_id",
",",
"network_2_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"qry",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceAttrMap",
")",
".",
"filter",
"(",
"or_",
"(",
"ResourceAttrMap",
".",
"network_a_id",
"==",
"network_id",
",",
"ResourceAttrMap",
".",
"network_b_id",
"==",
"network_id",
")",
")",
"if",
"network_2_id",
"is",
"not",
"None",
":",
"qry",
"=",
"qry",
".",
"filter",
"(",
"or_",
"(",
"ResourceAttrMap",
".",
"network_a_id",
"==",
"network_2_id",
",",
"ResourceAttrMap",
".",
"network_b_id",
"==",
"network_2_id",
")",
")",
"mappings",
"=",
"qry",
".",
"all",
"(",
")",
"for",
"m",
"in",
"mappings",
":",
"db",
".",
"DBSession",
".",
"delete",
"(",
"m",
")",
"db",
".",
"DBSession",
".",
"flush",
"(",
")",
"return",
"'OK'"
] | 38.235294 | 0.009009 | [
"def delete_mappings_in_network(network_id, network_2_id=None, **kwargs):\n",
" \"\"\"\n",
" Delete all the resource attribute mappings in a network. If another network\n",
" is specified, only delete the mappings between the two networks.\n",
" \"\"\"\n",
" qry = db.DBSession.query(ResourceAttrMap).filter(or_(ResourceAttrMap.network_a_id == network_id, ResourceAttrMap.network_b_id == network_id))\n",
"\n",
" if network_2_id is not None:\n",
" qry = qry.filter(or_(ResourceAttrMap.network_a_id==network_2_id, ResourceAttrMap.network_b_id==network_2_id))\n",
"\n",
" mappings = qry.all()\n",
"\n",
" for m in mappings:\n",
" db.DBSession.delete(m)\n",
" db.DBSession.flush()\n",
"\n",
" return 'OK'"
] | [
0,
0,
0.011904761904761904,
0,
0,
0.00684931506849315,
0,
0,
0.025423728813559324,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 17 | 0.00652 |
def register_new_suffix_tree(case_insensitive=False):
"""Factory method, returns new suffix tree object.
"""
assert isinstance(case_insensitive, bool)
root_node = register_new_node()
suffix_tree_id = uuid4()
event = SuffixTree.Created(
originator_id=suffix_tree_id,
root_node_id=root_node.id,
case_insensitive=case_insensitive,
)
entity = SuffixTree.mutate(event=event)
assert isinstance(entity, SuffixTree)
entity.nodes[root_node.id] = root_node
publish(event)
return entity | [
"def",
"register_new_suffix_tree",
"(",
"case_insensitive",
"=",
"False",
")",
":",
"assert",
"isinstance",
"(",
"case_insensitive",
",",
"bool",
")",
"root_node",
"=",
"register_new_node",
"(",
")",
"suffix_tree_id",
"=",
"uuid4",
"(",
")",
"event",
"=",
"SuffixTree",
".",
"Created",
"(",
"originator_id",
"=",
"suffix_tree_id",
",",
"root_node_id",
"=",
"root_node",
".",
"id",
",",
"case_insensitive",
"=",
"case_insensitive",
",",
")",
"entity",
"=",
"SuffixTree",
".",
"mutate",
"(",
"event",
"=",
"event",
")",
"assert",
"isinstance",
"(",
"entity",
",",
"SuffixTree",
")",
"entity",
".",
"nodes",
"[",
"root_node",
".",
"id",
"]",
"=",
"root_node",
"publish",
"(",
"event",
")",
"return",
"entity"
] | 25.333333 | 0.001812 | [
"def register_new_suffix_tree(case_insensitive=False):\n",
" \"\"\"Factory method, returns new suffix tree object.\n",
" \"\"\"\n",
" assert isinstance(case_insensitive, bool)\n",
" root_node = register_new_node()\n",
"\n",
" suffix_tree_id = uuid4()\n",
" event = SuffixTree.Created(\n",
" originator_id=suffix_tree_id,\n",
" root_node_id=root_node.id,\n",
" case_insensitive=case_insensitive,\n",
" )\n",
" entity = SuffixTree.mutate(event=event)\n",
"\n",
" assert isinstance(entity, SuffixTree)\n",
"\n",
" entity.nodes[root_node.id] = root_node\n",
"\n",
" publish(event)\n",
"\n",
" return entity"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 21 | 0.002801 |
def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):
'''This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API.
Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.'''
return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only) | [
"def",
"get_or_create_in_transaction_wrapper",
"(",
"tsession",
",",
"model",
",",
"values",
",",
"missing_columns",
"=",
"[",
"]",
",",
"variable_columns",
"=",
"[",
"]",
",",
"updatable_columns",
"=",
"[",
"]",
",",
"only_use_supplied_columns",
"=",
"False",
",",
"read_only",
"=",
"False",
")",
":",
"return",
"get_or_create_in_transaction",
"(",
"tsession",
",",
"model",
",",
"values",
",",
"missing_columns",
"=",
"missing_columns",
",",
"variable_columns",
"=",
"variable_columns",
",",
"updatable_columns",
"=",
"updatable_columns",
",",
"only_use_supplied_columns",
"=",
"only_use_supplied_columns",
",",
"read_only",
"=",
"read_only",
")"
] | 187.25 | 0.033245 | [
"def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):\n",
" '''This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API.\n",
" Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.'''\n",
" return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only)"
] | [
0.05789473684210526,
0.006711409395973154,
0.00625,
0.04743083003952569
] | 4 | 0.029572 |
def download_url(url, save_as, iter_size=_default_iter_size, enable_verbose=True):
"""A simple url binary content download function with progress info.
Warning: this function will silently overwrite existing file.
"""
msg = Messenger()
if enable_verbose:
msg.on()
else:
msg.off()
msg.show("Downloading %s from %s..." % (save_as, url))
with open(save_as, "wb") as f:
response = requests.get(url, stream=True)
if not response.ok:
print("http get error!")
return
start_time = time.clock()
downloaded_size = 0
for block in response.iter_content(iter_size):
if not block:
break
f.write(block)
elapse = datetime.timedelta(seconds=(time.clock() - start_time))
downloaded_size += sys.getsizeof(block)
msg.show(" Finished %s, elapse %s." % (
string_SizeInBytes(downloaded_size), elapse
))
msg.show(" Complete!") | [
"def",
"download_url",
"(",
"url",
",",
"save_as",
",",
"iter_size",
"=",
"_default_iter_size",
",",
"enable_verbose",
"=",
"True",
")",
":",
"msg",
"=",
"Messenger",
"(",
")",
"if",
"enable_verbose",
":",
"msg",
".",
"on",
"(",
")",
"else",
":",
"msg",
".",
"off",
"(",
")",
"msg",
".",
"show",
"(",
"\"Downloading %s from %s...\"",
"%",
"(",
"save_as",
",",
"url",
")",
")",
"with",
"open",
"(",
"save_as",
",",
"\"wb\"",
")",
"as",
"f",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"not",
"response",
".",
"ok",
":",
"print",
"(",
"\"http get error!\"",
")",
"return",
"start_time",
"=",
"time",
".",
"clock",
"(",
")",
"downloaded_size",
"=",
"0",
"for",
"block",
"in",
"response",
".",
"iter_content",
"(",
"iter_size",
")",
":",
"if",
"not",
"block",
":",
"break",
"f",
".",
"write",
"(",
"block",
")",
"elapse",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"(",
"time",
".",
"clock",
"(",
")",
"-",
"start_time",
")",
")",
"downloaded_size",
"+=",
"sys",
".",
"getsizeof",
"(",
"block",
")",
"msg",
".",
"show",
"(",
"\" Finished %s, elapse %s.\"",
"%",
"(",
"string_SizeInBytes",
"(",
"downloaded_size",
")",
",",
"elapse",
")",
")",
"msg",
".",
"show",
"(",
"\" Complete!\"",
")"
] | 30.363636 | 0.001934 | [
"def download_url(url, save_as, iter_size=_default_iter_size, enable_verbose=True):\n",
" \"\"\"A simple url binary content download function with progress info.\n",
"\n",
" Warning: this function will silently overwrite existing file.\n",
" \"\"\"\n",
" msg = Messenger()\n",
" if enable_verbose:\n",
" msg.on()\n",
" else:\n",
" msg.off()\n",
"\n",
" msg.show(\"Downloading %s from %s...\" % (save_as, url))\n",
"\n",
" with open(save_as, \"wb\") as f:\n",
" response = requests.get(url, stream=True)\n",
"\n",
" if not response.ok:\n",
" print(\"http get error!\")\n",
" return\n",
"\n",
" start_time = time.clock()\n",
" downloaded_size = 0\n",
" for block in response.iter_content(iter_size):\n",
" if not block:\n",
" break\n",
" f.write(block)\n",
" elapse = datetime.timedelta(seconds=(time.clock() - start_time))\n",
" downloaded_size += sys.getsizeof(block)\n",
" msg.show(\" Finished %s, elapse %s.\" % (\n",
" string_SizeInBytes(downloaded_size), elapse\n",
" ))\n",
"\n",
" msg.show(\" Complete!\")"
] | [
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655
] | 33 | 0.00141 |
def create_dvportgroup(portgroup_dict, portgroup_name, dvs,
service_instance=None):
'''
Creates a distributed virtual portgroup.
Note: The ``portgroup_name`` param will override any name already set
in ``portgroup_dict``.
portgroup_dict
Dictionary with the config values the portgroup should be created with
(example in salt.states.dvs).
portgroup_name
Name of the portgroup to be created.
dvs
Name of the DVS that will contain the portgroup.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_dvportgroup portgroup_dict=<dict>
portgroup_name=pg1 dvs=dvs1
'''
log.trace('Creating portgroup\'%s\' in dvs \'%s\' '
'with dict = %s', portgroup_name, dvs, portgroup_dict)
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
'retrieved'.format(dvs))
# Make the name of the dvportgroup consistent with the parameter
portgroup_dict['name'] = portgroup_name
spec = vim.DVPortgroupConfigSpec()
_apply_dvportgroup_config(portgroup_name, spec, portgroup_dict)
salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec)
return True | [
"def",
"create_dvportgroup",
"(",
"portgroup_dict",
",",
"portgroup_name",
",",
"dvs",
",",
"service_instance",
"=",
"None",
")",
":",
"log",
".",
"trace",
"(",
"'Creating portgroup\\'%s\\' in dvs \\'%s\\' '",
"'with dict = %s'",
",",
"portgroup_name",
",",
"dvs",
",",
"portgroup_dict",
")",
"proxy_type",
"=",
"get_proxy_type",
"(",
")",
"if",
"proxy_type",
"==",
"'esxdatacenter'",
":",
"datacenter",
"=",
"__salt__",
"[",
"'esxdatacenter.get_details'",
"]",
"(",
")",
"[",
"'datacenter'",
"]",
"dc_ref",
"=",
"_get_proxy_target",
"(",
"service_instance",
")",
"elif",
"proxy_type",
"==",
"'esxcluster'",
":",
"datacenter",
"=",
"__salt__",
"[",
"'esxcluster.get_details'",
"]",
"(",
")",
"[",
"'datacenter'",
"]",
"dc_ref",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_datacenter",
"(",
"service_instance",
",",
"datacenter",
")",
"dvs_refs",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_dvss",
"(",
"dc_ref",
",",
"dvs_names",
"=",
"[",
"dvs",
"]",
")",
"if",
"not",
"dvs_refs",
":",
"raise",
"VMwareObjectRetrievalError",
"(",
"'DVS \\'{0}\\' was not '",
"'retrieved'",
".",
"format",
"(",
"dvs",
")",
")",
"# Make the name of the dvportgroup consistent with the parameter",
"portgroup_dict",
"[",
"'name'",
"]",
"=",
"portgroup_name",
"spec",
"=",
"vim",
".",
"DVPortgroupConfigSpec",
"(",
")",
"_apply_dvportgroup_config",
"(",
"portgroup_name",
",",
"spec",
",",
"portgroup_dict",
")",
"salt",
".",
"utils",
".",
"vmware",
".",
"create_dvportgroup",
"(",
"dvs_refs",
"[",
"0",
"]",
",",
"spec",
")",
"return",
"True"
] | 38.326087 | 0.000553 | [
"def create_dvportgroup(portgroup_dict, portgroup_name, dvs,\n",
" service_instance=None):\n",
" '''\n",
" Creates a distributed virtual portgroup.\n",
"\n",
" Note: The ``portgroup_name`` param will override any name already set\n",
" in ``portgroup_dict``.\n",
"\n",
" portgroup_dict\n",
" Dictionary with the config values the portgroup should be created with\n",
" (example in salt.states.dvs).\n",
"\n",
" portgroup_name\n",
" Name of the portgroup to be created.\n",
"\n",
" dvs\n",
" Name of the DVS that will contain the portgroup.\n",
"\n",
" service_instance\n",
" Service instance (vim.ServiceInstance) of the vCenter.\n",
" Default is None.\n",
"\n",
" .. code-block:: bash\n",
"\n",
" salt '*' vsphere.create_dvportgroup portgroup_dict=<dict>\n",
" portgroup_name=pg1 dvs=dvs1\n",
" '''\n",
" log.trace('Creating portgroup\\'%s\\' in dvs \\'%s\\' '\n",
" 'with dict = %s', portgroup_name, dvs, portgroup_dict)\n",
" proxy_type = get_proxy_type()\n",
" if proxy_type == 'esxdatacenter':\n",
" datacenter = __salt__['esxdatacenter.get_details']()['datacenter']\n",
" dc_ref = _get_proxy_target(service_instance)\n",
" elif proxy_type == 'esxcluster':\n",
" datacenter = __salt__['esxcluster.get_details']()['datacenter']\n",
" dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)\n",
" dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])\n",
" if not dvs_refs:\n",
" raise VMwareObjectRetrievalError('DVS \\'{0}\\' was not '\n",
" 'retrieved'.format(dvs))\n",
" # Make the name of the dvportgroup consistent with the parameter\n",
" portgroup_dict['name'] = portgroup_name\n",
" spec = vim.DVPortgroupConfigSpec()\n",
" _apply_dvportgroup_config(portgroup_name, spec, portgroup_dict)\n",
" salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec)\n",
" return True"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 46 | 0.001449 |
def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, dblclick=True, guiEvent=evt) | [
"def",
"_onLeftButtonDClick",
"(",
"self",
",",
"evt",
")",
":",
"x",
"=",
"evt",
".",
"GetX",
"(",
")",
"y",
"=",
"self",
".",
"figure",
".",
"bbox",
".",
"height",
"-",
"evt",
".",
"GetY",
"(",
")",
"evt",
".",
"Skip",
"(",
")",
"self",
".",
"CaptureMouse",
"(",
")",
"FigureCanvasBase",
".",
"button_press_event",
"(",
"self",
",",
"x",
",",
"y",
",",
"1",
",",
"dblclick",
"=",
"True",
",",
"guiEvent",
"=",
"evt",
")"
] | 39.714286 | 0.010563 | [
"def _onLeftButtonDClick(self, evt):\n",
" \"\"\"Start measuring on an axis.\"\"\"\n",
" x = evt.GetX()\n",
" y = self.figure.bbox.height - evt.GetY()\n",
" evt.Skip()\n",
" self.CaptureMouse()\n",
" FigureCanvasBase.button_press_event(self, x, y, 1, dblclick=True, guiEvent=evt)"
] | [
0,
0.023809523809523808,
0,
0,
0,
0,
0.022988505747126436
] | 7 | 0.006685 |
def _position_in_feature(pos_a, pos_b):
"""return distance to 3' and 5' end of the feature"""
strd = "-"
if pos_a[2] in pos_b[2]:
strd = "+"
if pos_a[2] in "+" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[1] + 1
if pos_a[2] in "+" and pos_b[2] in "-":
lento5 = pos_a[1] - pos_b[0] + 1
lento3 = pos_a[0] - pos_b[1] + 1
if pos_a[2] in "-" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[0] + 1
if pos_a[2] in "-" and pos_b[2] in "-":
lento3 = pos_a[0] - pos_b[0] + 1
lento5 = pos_a[1] - pos_b[1] + 1
else:
lento5 = pos_a[0] - pos_b[0] + 1
lento3 = pos_a[1] - pos_b[1] + 1
return lento5, lento3, strd | [
"def",
"_position_in_feature",
"(",
"pos_a",
",",
"pos_b",
")",
":",
"strd",
"=",
"\"-\"",
"if",
"pos_a",
"[",
"2",
"]",
"in",
"pos_b",
"[",
"2",
"]",
":",
"strd",
"=",
"\"+\"",
"if",
"pos_a",
"[",
"2",
"]",
"in",
"\"+\"",
"and",
"pos_b",
"[",
"2",
"]",
"in",
"\"+\"",
":",
"lento5",
"=",
"pos_a",
"[",
"0",
"]",
"-",
"pos_b",
"[",
"1",
"]",
"+",
"1",
"lento3",
"=",
"pos_a",
"[",
"1",
"]",
"-",
"pos_b",
"[",
"1",
"]",
"+",
"1",
"if",
"pos_a",
"[",
"2",
"]",
"in",
"\"+\"",
"and",
"pos_b",
"[",
"2",
"]",
"in",
"\"-\"",
":",
"lento5",
"=",
"pos_a",
"[",
"1",
"]",
"-",
"pos_b",
"[",
"0",
"]",
"+",
"1",
"lento3",
"=",
"pos_a",
"[",
"0",
"]",
"-",
"pos_b",
"[",
"1",
"]",
"+",
"1",
"if",
"pos_a",
"[",
"2",
"]",
"in",
"\"-\"",
"and",
"pos_b",
"[",
"2",
"]",
"in",
"\"+\"",
":",
"lento5",
"=",
"pos_a",
"[",
"0",
"]",
"-",
"pos_b",
"[",
"1",
"]",
"+",
"1",
"lento3",
"=",
"pos_a",
"[",
"1",
"]",
"-",
"pos_b",
"[",
"0",
"]",
"+",
"1",
"if",
"pos_a",
"[",
"2",
"]",
"in",
"\"-\"",
"and",
"pos_b",
"[",
"2",
"]",
"in",
"\"-\"",
":",
"lento3",
"=",
"pos_a",
"[",
"0",
"]",
"-",
"pos_b",
"[",
"0",
"]",
"+",
"1",
"lento5",
"=",
"pos_a",
"[",
"1",
"]",
"-",
"pos_b",
"[",
"1",
"]",
"+",
"1",
"else",
":",
"lento5",
"=",
"pos_a",
"[",
"0",
"]",
"-",
"pos_b",
"[",
"0",
"]",
"+",
"1",
"lento3",
"=",
"pos_a",
"[",
"1",
"]",
"-",
"pos_b",
"[",
"1",
"]",
"+",
"1",
"return",
"lento5",
",",
"lento3",
",",
"strd"
] | 36.571429 | 0.001269 | [
"def _position_in_feature(pos_a, pos_b):\n",
" \"\"\"return distance to 3' and 5' end of the feature\"\"\"\n",
" strd = \"-\"\n",
" if pos_a[2] in pos_b[2]:\n",
" strd = \"+\"\n",
" if pos_a[2] in \"+\" and pos_b[2] in \"+\":\n",
" lento5 = pos_a[0] - pos_b[1] + 1\n",
" lento3 = pos_a[1] - pos_b[1] + 1\n",
" if pos_a[2] in \"+\" and pos_b[2] in \"-\":\n",
" lento5 = pos_a[1] - pos_b[0] + 1\n",
" lento3 = pos_a[0] - pos_b[1] + 1\n",
" if pos_a[2] in \"-\" and pos_b[2] in \"+\":\n",
" lento5 = pos_a[0] - pos_b[1] + 1\n",
" lento3 = pos_a[1] - pos_b[0] + 1\n",
" if pos_a[2] in \"-\" and pos_b[2] in \"-\":\n",
" lento3 = pos_a[0] - pos_b[0] + 1\n",
" lento5 = pos_a[1] - pos_b[1] + 1\n",
" else:\n",
" lento5 = pos_a[0] - pos_b[0] + 1\n",
" lento3 = pos_a[1] - pos_b[1] + 1\n",
" return lento5, lento3, strd"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903
] | 21 | 0.001536 |
def remove_password_from_url(url):
# type: (S) -> S
"""
Given a url, remove the password and insert 4 dashes
:param url: The url to replace the authentication in
:type url: S
:return: The new URL without authentication
:rtype: S
"""
parsed = _get_parsed_url(url)
if parsed.auth:
auth, _, _ = parsed.auth.partition(":")
return parsed._replace(auth="{auth}:----".format(auth=auth)).url
return parsed.url | [
"def",
"remove_password_from_url",
"(",
"url",
")",
":",
"# type: (S) -> S",
"parsed",
"=",
"_get_parsed_url",
"(",
"url",
")",
"if",
"parsed",
".",
"auth",
":",
"auth",
",",
"_",
",",
"_",
"=",
"parsed",
".",
"auth",
".",
"partition",
"(",
"\":\"",
")",
"return",
"parsed",
".",
"_replace",
"(",
"auth",
"=",
"\"{auth}:----\"",
".",
"format",
"(",
"auth",
"=",
"auth",
")",
")",
".",
"url",
"return",
"parsed",
".",
"url"
] | 28 | 0.00216 | [
"def remove_password_from_url(url):\n",
" # type: (S) -> S\n",
" \"\"\"\n",
" Given a url, remove the password and insert 4 dashes\n",
"\n",
" :param url: The url to replace the authentication in\n",
" :type url: S\n",
" :return: The new URL without authentication\n",
" :rtype: S\n",
" \"\"\"\n",
"\n",
" parsed = _get_parsed_url(url)\n",
" if parsed.auth:\n",
" auth, _, _ = parsed.auth.partition(\":\")\n",
" return parsed._replace(auth=\"{auth}:----\".format(auth=auth)).url\n",
" return parsed.url"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 16 | 0.002976 |
def from_zipfile(cls, path, filename, encoding, dialect, fields, converters):
"""Read delimited text from zipfile."""
stream = ZipReader(path, filename).readlines(encoding)
return cls(stream, dialect, fields, converters) | [
"def",
"from_zipfile",
"(",
"cls",
",",
"path",
",",
"filename",
",",
"encoding",
",",
"dialect",
",",
"fields",
",",
"converters",
")",
":",
"stream",
"=",
"ZipReader",
"(",
"path",
",",
"filename",
")",
".",
"readlines",
"(",
"encoding",
")",
"return",
"cls",
"(",
"stream",
",",
"dialect",
",",
"fields",
",",
"converters",
")"
] | 48.2 | 0.008163 | [
"def from_zipfile(cls, path, filename, encoding, dialect, fields, converters):\n",
" \"\"\"Read delimited text from zipfile.\"\"\"\n",
"\n",
" stream = ZipReader(path, filename).readlines(encoding)\n",
" return cls(stream, dialect, fields, converters)"
] | [
0,
0.020833333333333332,
0,
0,
0.01818181818181818
] | 5 | 0.007803 |
def get_checklist(self, id, name=None):
'''
Get a checklist
Returns:
Checklist: The checklist with the given `id`
'''
return self.create_checklist(dict(id=id, name=name)) | [
"def",
"get_checklist",
"(",
"self",
",",
"id",
",",
"name",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_checklist",
"(",
"dict",
"(",
"id",
"=",
"id",
",",
"name",
"=",
"name",
")",
")"
] | 27 | 0.008969 | [
"def get_checklist(self, id, name=None):\n",
" '''\n",
" Get a checklist\n",
"\n",
" Returns:\n",
" Checklist: The checklist with the given `id`\n",
" '''\n",
" return self.create_checklist(dict(id=id, name=name))"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.016666666666666666
] | 8 | 0.0125 |
def _get_ssl(self):
"""Get an SMTP session with SSL."""
return smtplib.SMTP_SSL(
self.server, self.port, context=ssl.create_default_context()
) | [
"def",
"_get_ssl",
"(",
"self",
")",
":",
"return",
"smtplib",
".",
"SMTP_SSL",
"(",
"self",
".",
"server",
",",
"self",
".",
"port",
",",
"context",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
")"
] | 35 | 0.011173 | [
"def _get_ssl(self):\n",
" \"\"\"Get an SMTP session with SSL.\"\"\"\n",
" return smtplib.SMTP_SSL(\n",
" self.server, self.port, context=ssl.create_default_context()\n",
" )"
] | [
0,
0.022727272727272728,
0,
0,
0.1111111111111111
] | 5 | 0.026768 |
def get_turnover(positions, transactions, denominator='AGB'):
"""
- Value of purchases and sales divided
by either the actual gross book or the portfolio value
for the time step.
Parameters
----------
positions : pd.DataFrame
Contains daily position values including cash.
- See full explanation in tears.create_full_tear_sheet
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet
denominator : str, optional
Either 'AGB' or 'portfolio_value', default AGB.
- AGB (Actual gross book) is the gross market
value (GMV) of the specific algo being analyzed.
Swapping out an entire portfolio of stocks for
another will yield 200% turnover, not 100%, since
transactions are being made for both sides.
- We use average of the previous and the current end-of-period
AGB to avoid singularities when trading only into or
out of an entire book in one trading period.
- portfolio_value is the total value of the algo's
positions end-of-period, including cash.
Returns
-------
turnover_rate : pd.Series
timeseries of portfolio turnover rates.
"""
txn_vol = get_txn_vol(transactions)
traded_value = txn_vol.txn_volume
if denominator == 'AGB':
# Actual gross book is the same thing as the algo's GMV
# We want our denom to be avg(AGB previous, AGB current)
AGB = positions.drop('cash', axis=1).abs().sum(axis=1)
denom = AGB.rolling(2).mean()
# Since the first value of pd.rolling returns NaN, we
# set our "day 0" AGB to 0.
denom.iloc[0] = AGB.iloc[0] / 2
elif denominator == 'portfolio_value':
denom = positions.sum(axis=1)
else:
raise ValueError(
"Unexpected value for denominator '{}'. The "
"denominator parameter must be either 'AGB'"
" or 'portfolio_value'.".format(denominator)
)
denom.index = denom.index.normalize()
turnover = traded_value.div(denom, axis='index')
turnover = turnover.fillna(0)
return turnover | [
"def",
"get_turnover",
"(",
"positions",
",",
"transactions",
",",
"denominator",
"=",
"'AGB'",
")",
":",
"txn_vol",
"=",
"get_txn_vol",
"(",
"transactions",
")",
"traded_value",
"=",
"txn_vol",
".",
"txn_volume",
"if",
"denominator",
"==",
"'AGB'",
":",
"# Actual gross book is the same thing as the algo's GMV",
"# We want our denom to be avg(AGB previous, AGB current)",
"AGB",
"=",
"positions",
".",
"drop",
"(",
"'cash'",
",",
"axis",
"=",
"1",
")",
".",
"abs",
"(",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"denom",
"=",
"AGB",
".",
"rolling",
"(",
"2",
")",
".",
"mean",
"(",
")",
"# Since the first value of pd.rolling returns NaN, we",
"# set our \"day 0\" AGB to 0.",
"denom",
".",
"iloc",
"[",
"0",
"]",
"=",
"AGB",
".",
"iloc",
"[",
"0",
"]",
"/",
"2",
"elif",
"denominator",
"==",
"'portfolio_value'",
":",
"denom",
"=",
"positions",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected value for denominator '{}'. The \"",
"\"denominator parameter must be either 'AGB'\"",
"\" or 'portfolio_value'.\"",
".",
"format",
"(",
"denominator",
")",
")",
"denom",
".",
"index",
"=",
"denom",
".",
"index",
".",
"normalize",
"(",
")",
"turnover",
"=",
"traded_value",
".",
"div",
"(",
"denom",
",",
"axis",
"=",
"'index'",
")",
"turnover",
"=",
"turnover",
".",
"fillna",
"(",
"0",
")",
"return",
"turnover"
] | 37.448276 | 0.000449 | [
"def get_turnover(positions, transactions, denominator='AGB'):\n",
" \"\"\"\n",
" - Value of purchases and sales divided\n",
" by either the actual gross book or the portfolio value\n",
" for the time step.\n",
"\n",
" Parameters\n",
" ----------\n",
" positions : pd.DataFrame\n",
" Contains daily position values including cash.\n",
" - See full explanation in tears.create_full_tear_sheet\n",
" transactions : pd.DataFrame\n",
" Prices and amounts of executed trades. One row per trade.\n",
" - See full explanation in tears.create_full_tear_sheet\n",
" denominator : str, optional\n",
" Either 'AGB' or 'portfolio_value', default AGB.\n",
" - AGB (Actual gross book) is the gross market\n",
" value (GMV) of the specific algo being analyzed.\n",
" Swapping out an entire portfolio of stocks for\n",
" another will yield 200% turnover, not 100%, since\n",
" transactions are being made for both sides.\n",
" - We use average of the previous and the current end-of-period\n",
" AGB to avoid singularities when trading only into or\n",
" out of an entire book in one trading period.\n",
" - portfolio_value is the total value of the algo's\n",
" positions end-of-period, including cash.\n",
"\n",
" Returns\n",
" -------\n",
" turnover_rate : pd.Series\n",
" timeseries of portfolio turnover rates.\n",
" \"\"\"\n",
"\n",
" txn_vol = get_txn_vol(transactions)\n",
" traded_value = txn_vol.txn_volume\n",
"\n",
" if denominator == 'AGB':\n",
" # Actual gross book is the same thing as the algo's GMV\n",
" # We want our denom to be avg(AGB previous, AGB current)\n",
" AGB = positions.drop('cash', axis=1).abs().sum(axis=1)\n",
" denom = AGB.rolling(2).mean()\n",
"\n",
" # Since the first value of pd.rolling returns NaN, we\n",
" # set our \"day 0\" AGB to 0.\n",
" denom.iloc[0] = AGB.iloc[0] / 2\n",
" elif denominator == 'portfolio_value':\n",
" denom = positions.sum(axis=1)\n",
" else:\n",
" raise ValueError(\n",
" \"Unexpected value for denominator '{}'. The \"\n",
" \"denominator parameter must be either 'AGB'\"\n",
" \" or 'portfolio_value'.\".format(denominator)\n",
" )\n",
"\n",
" denom.index = denom.index.normalize()\n",
" turnover = traded_value.div(denom, axis='index')\n",
" turnover = turnover.fillna(0)\n",
" return turnover"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 58 | 0.000907 |
def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found' | [
"def",
"validate_v2_endpoint_data",
"(",
"self",
",",
"endpoints",
",",
"admin_port",
",",
"internal_port",
",",
"public_port",
",",
"expected",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Validating endpoint data...'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'actual: {}'",
".",
"format",
"(",
"repr",
"(",
"endpoints",
")",
")",
")",
"found",
"=",
"False",
"for",
"ep",
"in",
"endpoints",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'endpoint: {}'",
".",
"format",
"(",
"repr",
"(",
"ep",
")",
")",
")",
"if",
"(",
"admin_port",
"in",
"ep",
".",
"adminurl",
"and",
"internal_port",
"in",
"ep",
".",
"internalurl",
"and",
"public_port",
"in",
"ep",
".",
"publicurl",
")",
":",
"found",
"=",
"True",
"actual",
"=",
"{",
"'id'",
":",
"ep",
".",
"id",
",",
"'region'",
":",
"ep",
".",
"region",
",",
"'adminurl'",
":",
"ep",
".",
"adminurl",
",",
"'internalurl'",
":",
"ep",
".",
"internalurl",
",",
"'publicurl'",
":",
"ep",
".",
"publicurl",
",",
"'service_id'",
":",
"ep",
".",
"service_id",
"}",
"ret",
"=",
"self",
".",
"_validate_dict_data",
"(",
"expected",
",",
"actual",
")",
"if",
"ret",
":",
"return",
"'unexpected endpoint data - {}'",
".",
"format",
"(",
"ret",
")",
"if",
"not",
"found",
":",
"return",
"'endpoint not found'"
] | 43.428571 | 0.002414 | [
"def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,\n",
" public_port, expected):\n",
" \"\"\"Validate endpoint data.\n",
"\n",
" Validate actual endpoint data vs expected endpoint data. The ports\n",
" are used to find the matching endpoint.\n",
" \"\"\"\n",
" self.log.debug('Validating endpoint data...')\n",
" self.log.debug('actual: {}'.format(repr(endpoints)))\n",
" found = False\n",
" for ep in endpoints:\n",
" self.log.debug('endpoint: {}'.format(repr(ep)))\n",
" if (admin_port in ep.adminurl and\n",
" internal_port in ep.internalurl and\n",
" public_port in ep.publicurl):\n",
" found = True\n",
" actual = {'id': ep.id,\n",
" 'region': ep.region,\n",
" 'adminurl': ep.adminurl,\n",
" 'internalurl': ep.internalurl,\n",
" 'publicurl': ep.publicurl,\n",
" 'service_id': ep.service_id}\n",
" ret = self._validate_dict_data(expected, actual)\n",
" if ret:\n",
" return 'unexpected endpoint data - {}'.format(ret)\n",
"\n",
" if not found:\n",
" return 'endpoint not found'"
] | [
0,
0.017241379310344827,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564
] | 28 | 0.002552 |
def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default | [
"def",
"add_config",
"(",
"self",
",",
"key",
",",
"type_",
",",
"default",
"=",
"NOT_SET",
",",
"env_var",
"=",
"None",
")",
":",
"self",
".",
"config",
"[",
"key",
"]",
"=",
"{",
"'type'",
":",
"type_",
"}",
"if",
"env_var",
"is",
"not",
"None",
":",
"self",
".",
"config",
"[",
"key",
"]",
"[",
"'env_var'",
"]",
"=",
"env_var",
"if",
"default",
"is",
"not",
"NOT_SET",
":",
"self",
".",
"config",
"[",
"key",
"]",
"[",
"'default'",
"]",
"=",
"default"
] | 45.241379 | 0.001493 | [
"def add_config(self, key, type_, default=NOT_SET, env_var=None):\n",
" \"\"\"Add a configuration setting.\n",
"\n",
" Parameters\n",
" ----------\n",
" key : str\n",
" The name of the configuration setting. This must be a valid\n",
" Python attribute name i.e. alphanumeric with underscores.\n",
" type : function\n",
" A function such as ``float``, ``int`` or ``str`` which takes\n",
" the configuration value and returns an object of the correct\n",
" type. Note that the values retrieved from environment\n",
" variables are always strings, while those retrieved from the\n",
" YAML file might already be parsed. Hence, the function provided\n",
" here must accept both types of input.\n",
" default : object, optional\n",
" The default configuration to return if not set. By default none\n",
" is set and an error is raised instead.\n",
" env_var : str, optional\n",
" The environment variable name that holds this configuration\n",
" value. If not given, this configuration can only be set in the\n",
" YAML configuration file.\n",
"\n",
" \"\"\"\n",
" self.config[key] = {'type': type_}\n",
" if env_var is not None:\n",
" self.config[key]['env_var'] = env_var\n",
" if default is not NOT_SET:\n",
" self.config[key]['default'] = default"
] | [
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612
] | 29 | 0.001566 |
def minus(self, a):
""" Subtract. """
return Vector(self.x-a.x, self.y-a.y, self.z-a.z) | [
"def",
"minus",
"(",
"self",
",",
"a",
")",
":",
"return",
"Vector",
"(",
"self",
".",
"x",
"-",
"a",
".",
"x",
",",
"self",
".",
"y",
"-",
"a",
".",
"y",
",",
"self",
".",
"z",
"-",
"a",
".",
"z",
")"
] | 33.666667 | 0.019417 | [
"def minus(self, a):\n",
" \"\"\" Subtract. \"\"\"\n",
" return Vector(self.x-a.x, self.y-a.y, self.z-a.z)"
] | [
0,
0.038461538461538464,
0.017543859649122806
] | 3 | 0.018668 |
def gen_mac(prefix='AC:DE:48'):
'''
Generates a MAC address with the defined OUI prefix.
Common prefixes:
- ``00:16:3E`` -- Xen
- ``00:18:51`` -- OpenVZ
- ``00:50:56`` -- VMware (manually generated)
- ``52:54:00`` -- QEMU/KVM
- ``AC:DE:48`` -- PRIVATE
References:
- http://standards.ieee.org/develop/regauth/oui/oui.txt
- https://www.wireshark.org/tools/oui-lookup.html
- https://en.wikipedia.org/wiki/MAC_address
'''
return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix,
random.randint(0, 0xff),
random.randint(0, 0xff),
random.randint(0, 0xff)) | [
"def",
"gen_mac",
"(",
"prefix",
"=",
"'AC:DE:48'",
")",
":",
"return",
"'{0}:{1:02X}:{2:02X}:{3:02X}'",
".",
"format",
"(",
"prefix",
",",
"random",
".",
"randint",
"(",
"0",
",",
"0xff",
")",
",",
"random",
".",
"randint",
"(",
"0",
",",
"0xff",
")",
",",
"random",
".",
"randint",
"(",
"0",
",",
"0xff",
")",
")"
] | 33.409091 | 0.001323 | [
"def gen_mac(prefix='AC:DE:48'):\n",
" '''\n",
" Generates a MAC address with the defined OUI prefix.\n",
"\n",
" Common prefixes:\n",
"\n",
" - ``00:16:3E`` -- Xen\n",
" - ``00:18:51`` -- OpenVZ\n",
" - ``00:50:56`` -- VMware (manually generated)\n",
" - ``52:54:00`` -- QEMU/KVM\n",
" - ``AC:DE:48`` -- PRIVATE\n",
"\n",
" References:\n",
"\n",
" - http://standards.ieee.org/develop/regauth/oui/oui.txt\n",
" - https://www.wireshark.org/tools/oui-lookup.html\n",
" - https://en.wikipedia.org/wiki/MAC_address\n",
" '''\n",
" return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix,\n",
" random.randint(0, 0xff),\n",
" random.randint(0, 0xff),\n",
" random.randint(0, 0xff))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013888888888888888
] | 22 | 0.000631 |
def data(self, index, role):
'''Return data for *index* according to *role*.'''
if not index.isValid():
return None
column = index.column()
item = index.internalPointer()
if role == self.ITEM_ROLE:
return item
elif role == Qt.DisplayRole:
if column == 0:
return item.name
elif column == 1:
if item.size:
return item.size
elif column == 2:
return item.type
elif column == 3:
if item.modified is not None:
return item.modified.strftime('%c')
elif role == Qt.DecorationRole:
if column == 0:
return self.iconFactory.icon(item)
elif role == Qt.TextAlignmentRole:
if column == 1:
return Qt.AlignRight
else:
return Qt.AlignLeft
return None | [
"def",
"data",
"(",
"self",
",",
"index",
",",
"role",
")",
":",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"return",
"None",
"column",
"=",
"index",
".",
"column",
"(",
")",
"item",
"=",
"index",
".",
"internalPointer",
"(",
")",
"if",
"role",
"==",
"self",
".",
"ITEM_ROLE",
":",
"return",
"item",
"elif",
"role",
"==",
"Qt",
".",
"DisplayRole",
":",
"if",
"column",
"==",
"0",
":",
"return",
"item",
".",
"name",
"elif",
"column",
"==",
"1",
":",
"if",
"item",
".",
"size",
":",
"return",
"item",
".",
"size",
"elif",
"column",
"==",
"2",
":",
"return",
"item",
".",
"type",
"elif",
"column",
"==",
"3",
":",
"if",
"item",
".",
"modified",
"is",
"not",
"None",
":",
"return",
"item",
".",
"modified",
".",
"strftime",
"(",
"'%c'",
")",
"elif",
"role",
"==",
"Qt",
".",
"DecorationRole",
":",
"if",
"column",
"==",
"0",
":",
"return",
"self",
".",
"iconFactory",
".",
"icon",
"(",
"item",
")",
"elif",
"role",
"==",
"Qt",
".",
"TextAlignmentRole",
":",
"if",
"column",
"==",
"1",
":",
"return",
"Qt",
".",
"AlignRight",
"else",
":",
"return",
"Qt",
".",
"AlignLeft",
"return",
"None"
] | 26.771429 | 0.00206 | [
"def data(self, index, role):\n",
" '''Return data for *index* according to *role*.'''\n",
" if not index.isValid():\n",
" return None\n",
"\n",
" column = index.column()\n",
" item = index.internalPointer()\n",
"\n",
" if role == self.ITEM_ROLE:\n",
" return item\n",
"\n",
" elif role == Qt.DisplayRole:\n",
"\n",
" if column == 0:\n",
" return item.name\n",
" elif column == 1:\n",
" if item.size:\n",
" return item.size\n",
" elif column == 2:\n",
" return item.type\n",
" elif column == 3:\n",
" if item.modified is not None:\n",
" return item.modified.strftime('%c')\n",
"\n",
" elif role == Qt.DecorationRole:\n",
" if column == 0:\n",
" return self.iconFactory.icon(item)\n",
"\n",
" elif role == Qt.TextAlignmentRole:\n",
" if column == 1:\n",
" return Qt.AlignRight\n",
" else:\n",
" return Qt.AlignLeft\n",
"\n",
" return None"
] | [
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 35 | 0.001988 |
def get_membrane_xml(self, pdb_id):
''' Returns the <MEMBRANE> tag XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_membrane_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string | [
"def",
"get_membrane_xml",
"(",
"self",
",",
"pdb_id",
")",
":",
"self",
".",
"tmp_string",
"=",
"None",
"context",
"=",
"etree",
".",
"iterparse",
"(",
"io",
".",
"BytesIO",
"(",
"self",
".",
"xml_contents",
")",
",",
"events",
"=",
"(",
"'end'",
",",
")",
",",
"tag",
"=",
"self",
".",
"PDBTM_entry_tag_type",
")",
"try",
":",
"fast_iter",
"(",
"context",
",",
"self",
".",
"_get_membrane_xml",
",",
"pdb_id",
"=",
"pdb_id",
".",
"upper",
"(",
")",
")",
"except",
"EarlyOut",
":",
"pass",
"return",
"self",
".",
"tmp_string"
] | 50.25 | 0.01467 | [
"def get_membrane_xml(self, pdb_id):\n",
" ''' Returns the <MEMBRANE> tag XML for pdb_id if the tag exists.'''\n",
" self.tmp_string = None\n",
" context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)\n",
" try:\n",
" fast_iter(context, self._get_membrane_xml, pdb_id = pdb_id.upper())\n",
" except EarlyOut: pass\n",
" return self.tmp_string"
] | [
0,
0.013157894736842105,
0,
0.008849557522123894,
0,
0.025,
0.03333333333333333,
0.03333333333333333
] | 8 | 0.014209 |
def get_all_roles(path_prefix=None, region=None, key=None, keyid=None,
profile=None):
'''
Get and return all IAM role details, starting at the optional path.
.. versionadded:: 2016.3.0
CLI Example:
salt-call boto_iam.get_all_roles
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_roles = conn.list_roles(path_prefix=path_prefix)
roles = _roles.list_roles_response.list_roles_result.roles
marker = getattr(
_roles.list_roles_response.list_roles_result, 'marker', None
)
while marker:
_roles = conn.list_roles(path_prefix=path_prefix, marker=marker)
roles = roles + _roles.list_roles_response.list_roles_result.roles
marker = getattr(
_roles.list_roles_response.list_roles_result, 'marker', None
)
return roles | [
"def",
"get_all_roles",
"(",
"path_prefix",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"conn",
":",
"return",
"None",
"_roles",
"=",
"conn",
".",
"list_roles",
"(",
"path_prefix",
"=",
"path_prefix",
")",
"roles",
"=",
"_roles",
".",
"list_roles_response",
".",
"list_roles_result",
".",
"roles",
"marker",
"=",
"getattr",
"(",
"_roles",
".",
"list_roles_response",
".",
"list_roles_result",
",",
"'marker'",
",",
"None",
")",
"while",
"marker",
":",
"_roles",
"=",
"conn",
".",
"list_roles",
"(",
"path_prefix",
"=",
"path_prefix",
",",
"marker",
"=",
"marker",
")",
"roles",
"=",
"roles",
"+",
"_roles",
".",
"list_roles_response",
".",
"list_roles_result",
".",
"roles",
"marker",
"=",
"getattr",
"(",
"_roles",
".",
"list_roles_response",
".",
"list_roles_result",
",",
"'marker'",
",",
"None",
")",
"return",
"roles"
] | 33.653846 | 0.002222 | [
"def get_all_roles(path_prefix=None, region=None, key=None, keyid=None,\n",
" profile=None):\n",
" '''\n",
" Get and return all IAM role details, starting at the optional path.\n",
"\n",
" .. versionadded:: 2016.3.0\n",
"\n",
" CLI Example:\n",
"\n",
" salt-call boto_iam.get_all_roles\n",
" '''\n",
" conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n",
" if not conn:\n",
" return None\n",
" _roles = conn.list_roles(path_prefix=path_prefix)\n",
" roles = _roles.list_roles_response.list_roles_result.roles\n",
" marker = getattr(\n",
" _roles.list_roles_response.list_roles_result, 'marker', None\n",
" )\n",
" while marker:\n",
" _roles = conn.list_roles(path_prefix=path_prefix, marker=marker)\n",
" roles = roles + _roles.list_roles_response.list_roles_result.roles\n",
" marker = getattr(\n",
" _roles.list_roles_response.list_roles_result, 'marker', None\n",
" )\n",
" return roles"
] | [
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 26 | 0.003606 |
def rename_document(self, old_path, new_path):
"""
Renames an already opened document (this will not rename the file,
just update the file path and tab title).
Use that function to update a file that has been renamed externally.
:param old_path: old path (path of the widget to rename with
``new_path``
:param new_path: new path that will be used to rename the tab.
"""
to_rename = []
title = os.path.split(new_path)[1]
for widget in self.widgets(include_clones=True):
p = os.path.normpath(os.path.normcase(widget.file.path))
old_path = os.path.normpath(os.path.normcase(old_path))
if p == old_path:
to_rename.append(widget)
for widget in to_rename:
tw = widget.parent_tab_widget
widget.file._path = new_path
tw.setTabText(tw.indexOf(widget), title) | [
"def",
"rename_document",
"(",
"self",
",",
"old_path",
",",
"new_path",
")",
":",
"to_rename",
"=",
"[",
"]",
"title",
"=",
"os",
".",
"path",
".",
"split",
"(",
"new_path",
")",
"[",
"1",
"]",
"for",
"widget",
"in",
"self",
".",
"widgets",
"(",
"include_clones",
"=",
"True",
")",
":",
"p",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"normcase",
"(",
"widget",
".",
"file",
".",
"path",
")",
")",
"old_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"normcase",
"(",
"old_path",
")",
")",
"if",
"p",
"==",
"old_path",
":",
"to_rename",
".",
"append",
"(",
"widget",
")",
"for",
"widget",
"in",
"to_rename",
":",
"tw",
"=",
"widget",
".",
"parent_tab_widget",
"widget",
".",
"file",
".",
"_path",
"=",
"new_path",
"tw",
".",
"setTabText",
"(",
"tw",
".",
"indexOf",
"(",
"widget",
")",
",",
"title",
")"
] | 41.727273 | 0.00213 | [
"def rename_document(self, old_path, new_path):\n",
" \"\"\"\n",
" Renames an already opened document (this will not rename the file,\n",
" just update the file path and tab title).\n",
"\n",
" Use that function to update a file that has been renamed externally.\n",
"\n",
" :param old_path: old path (path of the widget to rename with\n",
" ``new_path``\n",
" :param new_path: new path that will be used to rename the tab.\n",
" \"\"\"\n",
" to_rename = []\n",
" title = os.path.split(new_path)[1]\n",
" for widget in self.widgets(include_clones=True):\n",
" p = os.path.normpath(os.path.normcase(widget.file.path))\n",
" old_path = os.path.normpath(os.path.normcase(old_path))\n",
" if p == old_path:\n",
" to_rename.append(widget)\n",
" for widget in to_rename:\n",
" tw = widget.parent_tab_widget\n",
" widget.file._path = new_path\n",
" tw.setTabText(tw.indexOf(widget), title)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232
] | 22 | 0.004662 |
def _create_svc_vm_hosting_devices(self, context, num, template):
"""Creates <num> or less service VM instances based on <template>.
These hosting devices can be bound to a certain tenant or for shared
use. A list with the created hosting device VMs is returned.
"""
hosting_devices = []
template_id = template['id']
credentials_id = template['default_credentials_id']
plugging_drv = self.get_hosting_device_plugging_driver(context,
template_id)
hosting_device_drv = self.get_hosting_device_driver(context,
template_id)
if plugging_drv is None or hosting_device_drv is None or num <= 0:
return hosting_devices
#TODO(bobmel): Determine value for max_hosted properly
max_hosted = 1 # template['slot_capacity']
dev_data, mgmt_context = self._get_resources_properties_for_hd(
template, credentials_id)
credentials_info = self._credentials.get(credentials_id)
if credentials_info is None:
LOG.error('Could not find credentials for hosting device'
'template %s. Aborting VM hosting device creation.',
template_id)
return hosting_devices
connectivity_info = self._get_mgmt_connectivity_info(
context, self.mgmt_subnet_id())
for i in range(num):
complementary_id = uuidutils.generate_uuid()
res = plugging_drv.create_hosting_device_resources(
context, complementary_id, self.l3_tenant_id(), mgmt_context,
max_hosted)
if res.get('mgmt_port') is None:
# Required ports could not be created
return hosting_devices
connectivity_info['mgmt_port'] = res['mgmt_port']
vm_instance = self.svc_vm_mgr.dispatch_service_vm(
context, template['name'] + '_nrouter', template['image'],
template['flavor'], hosting_device_drv, credentials_info,
connectivity_info, res.get('ports'))
if vm_instance is not None:
dev_data.update(
{'id': vm_instance['id'],
'complementary_id': complementary_id,
'management_ip_address': res['mgmt_port'][
'fixed_ips'][0]['ip_address'],
'management_port_id': res['mgmt_port']['id']})
self.create_hosting_device(context,
{'hosting_device': dev_data})
hosting_devices.append(vm_instance)
else:
# Fundamental error like could not contact Nova
# Cleanup anything we created
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
break
LOG.info('Created %(num)d hosting device VMs based on template '
'%(t_id)s', {'num': len(hosting_devices),
't_id': template_id})
return hosting_devices | [
"def",
"_create_svc_vm_hosting_devices",
"(",
"self",
",",
"context",
",",
"num",
",",
"template",
")",
":",
"hosting_devices",
"=",
"[",
"]",
"template_id",
"=",
"template",
"[",
"'id'",
"]",
"credentials_id",
"=",
"template",
"[",
"'default_credentials_id'",
"]",
"plugging_drv",
"=",
"self",
".",
"get_hosting_device_plugging_driver",
"(",
"context",
",",
"template_id",
")",
"hosting_device_drv",
"=",
"self",
".",
"get_hosting_device_driver",
"(",
"context",
",",
"template_id",
")",
"if",
"plugging_drv",
"is",
"None",
"or",
"hosting_device_drv",
"is",
"None",
"or",
"num",
"<=",
"0",
":",
"return",
"hosting_devices",
"#TODO(bobmel): Determine value for max_hosted properly",
"max_hosted",
"=",
"1",
"# template['slot_capacity']",
"dev_data",
",",
"mgmt_context",
"=",
"self",
".",
"_get_resources_properties_for_hd",
"(",
"template",
",",
"credentials_id",
")",
"credentials_info",
"=",
"self",
".",
"_credentials",
".",
"get",
"(",
"credentials_id",
")",
"if",
"credentials_info",
"is",
"None",
":",
"LOG",
".",
"error",
"(",
"'Could not find credentials for hosting device'",
"'template %s. Aborting VM hosting device creation.'",
",",
"template_id",
")",
"return",
"hosting_devices",
"connectivity_info",
"=",
"self",
".",
"_get_mgmt_connectivity_info",
"(",
"context",
",",
"self",
".",
"mgmt_subnet_id",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num",
")",
":",
"complementary_id",
"=",
"uuidutils",
".",
"generate_uuid",
"(",
")",
"res",
"=",
"plugging_drv",
".",
"create_hosting_device_resources",
"(",
"context",
",",
"complementary_id",
",",
"self",
".",
"l3_tenant_id",
"(",
")",
",",
"mgmt_context",
",",
"max_hosted",
")",
"if",
"res",
".",
"get",
"(",
"'mgmt_port'",
")",
"is",
"None",
":",
"# Required ports could not be created",
"return",
"hosting_devices",
"connectivity_info",
"[",
"'mgmt_port'",
"]",
"=",
"res",
"[",
"'mgmt_port'",
"]",
"vm_instance",
"=",
"self",
".",
"svc_vm_mgr",
".",
"dispatch_service_vm",
"(",
"context",
",",
"template",
"[",
"'name'",
"]",
"+",
"'_nrouter'",
",",
"template",
"[",
"'image'",
"]",
",",
"template",
"[",
"'flavor'",
"]",
",",
"hosting_device_drv",
",",
"credentials_info",
",",
"connectivity_info",
",",
"res",
".",
"get",
"(",
"'ports'",
")",
")",
"if",
"vm_instance",
"is",
"not",
"None",
":",
"dev_data",
".",
"update",
"(",
"{",
"'id'",
":",
"vm_instance",
"[",
"'id'",
"]",
",",
"'complementary_id'",
":",
"complementary_id",
",",
"'management_ip_address'",
":",
"res",
"[",
"'mgmt_port'",
"]",
"[",
"'fixed_ips'",
"]",
"[",
"0",
"]",
"[",
"'ip_address'",
"]",
",",
"'management_port_id'",
":",
"res",
"[",
"'mgmt_port'",
"]",
"[",
"'id'",
"]",
"}",
")",
"self",
".",
"create_hosting_device",
"(",
"context",
",",
"{",
"'hosting_device'",
":",
"dev_data",
"}",
")",
"hosting_devices",
".",
"append",
"(",
"vm_instance",
")",
"else",
":",
"# Fundamental error like could not contact Nova",
"# Cleanup anything we created",
"plugging_drv",
".",
"delete_hosting_device_resources",
"(",
"context",
",",
"self",
".",
"l3_tenant_id",
"(",
")",
",",
"*",
"*",
"res",
")",
"break",
"LOG",
".",
"info",
"(",
"'Created %(num)d hosting device VMs based on template '",
"'%(t_id)s'",
",",
"{",
"'num'",
":",
"len",
"(",
"hosting_devices",
")",
",",
"'t_id'",
":",
"template_id",
"}",
")",
"return",
"hosting_devices"
] | 52.7 | 0.000931 | [
"def _create_svc_vm_hosting_devices(self, context, num, template):\n",
" \"\"\"Creates <num> or less service VM instances based on <template>.\n",
"\n",
" These hosting devices can be bound to a certain tenant or for shared\n",
" use. A list with the created hosting device VMs is returned.\n",
" \"\"\"\n",
" hosting_devices = []\n",
" template_id = template['id']\n",
" credentials_id = template['default_credentials_id']\n",
" plugging_drv = self.get_hosting_device_plugging_driver(context,\n",
" template_id)\n",
" hosting_device_drv = self.get_hosting_device_driver(context,\n",
" template_id)\n",
" if plugging_drv is None or hosting_device_drv is None or num <= 0:\n",
" return hosting_devices\n",
" #TODO(bobmel): Determine value for max_hosted properly\n",
" max_hosted = 1 # template['slot_capacity']\n",
" dev_data, mgmt_context = self._get_resources_properties_for_hd(\n",
" template, credentials_id)\n",
" credentials_info = self._credentials.get(credentials_id)\n",
" if credentials_info is None:\n",
" LOG.error('Could not find credentials for hosting device'\n",
" 'template %s. Aborting VM hosting device creation.',\n",
" template_id)\n",
" return hosting_devices\n",
" connectivity_info = self._get_mgmt_connectivity_info(\n",
" context, self.mgmt_subnet_id())\n",
" for i in range(num):\n",
" complementary_id = uuidutils.generate_uuid()\n",
" res = plugging_drv.create_hosting_device_resources(\n",
" context, complementary_id, self.l3_tenant_id(), mgmt_context,\n",
" max_hosted)\n",
" if res.get('mgmt_port') is None:\n",
" # Required ports could not be created\n",
" return hosting_devices\n",
" connectivity_info['mgmt_port'] = res['mgmt_port']\n",
" vm_instance = self.svc_vm_mgr.dispatch_service_vm(\n",
" context, template['name'] + '_nrouter', template['image'],\n",
" template['flavor'], hosting_device_drv, credentials_info,\n",
" connectivity_info, res.get('ports'))\n",
" if vm_instance is not None:\n",
" dev_data.update(\n",
" {'id': vm_instance['id'],\n",
" 'complementary_id': complementary_id,\n",
" 'management_ip_address': res['mgmt_port'][\n",
" 'fixed_ips'][0]['ip_address'],\n",
" 'management_port_id': res['mgmt_port']['id']})\n",
" self.create_hosting_device(context,\n",
" {'hosting_device': dev_data})\n",
" hosting_devices.append(vm_instance)\n",
" else:\n",
" # Fundamental error like could not contact Nova\n",
" # Cleanup anything we created\n",
" plugging_drv.delete_hosting_device_resources(\n",
" context, self.l3_tenant_id(), **res)\n",
" break\n",
" LOG.info('Created %(num)d hosting device VMs based on template '\n",
" '%(t_id)s', {'num': len(hosting_devices),\n",
" 't_id': template_id})\n",
" return hosting_devices"
] | [
0,
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333
] | 60 | 0.001042 |
def ensure_specification_cols_are_in_dataframe(specification, dataframe):
"""
Checks whether each column in `specification` is in `dataframe`. Raises
ValueError if any of the columns are not in the dataframe.
Parameters
----------
specification : OrderedDict.
Keys are a proper subset of the columns in `data`. Values are either a
list or a single string, "all_diff" or "all_same". If a list, the
elements should be:
- single objects that are in the alternative ID column of `data`
- lists of objects that are within the alternative ID column of
`data`. For each single object in the list, a unique column will
be created (i.e. there will be a unique coefficient for that
variable in the corresponding utility equation of the
corresponding alternative). For lists within the
`specification` values, a single column will be created for all
the alternatives within the iterable (i.e. there will be one
common coefficient for the variables in the iterable).
dataframe : pandas DataFrame.
Dataframe containing the data for the choice model to be estimated.
Returns
-------
None.
"""
# Make sure specification is an OrderedDict
try:
assert isinstance(specification, OrderedDict)
except AssertionError:
raise TypeError("`specification` must be an OrderedDict.")
# Make sure dataframe is a pandas dataframe
assert isinstance(dataframe, pd.DataFrame)
problem_cols = []
dataframe_cols = dataframe.columns
for key in specification:
if key not in dataframe_cols:
problem_cols.append(key)
if problem_cols != []:
msg = "The following keys in the specification are not in 'data':\n{}"
raise ValueError(msg.format(problem_cols))
return None | [
"def",
"ensure_specification_cols_are_in_dataframe",
"(",
"specification",
",",
"dataframe",
")",
":",
"# Make sure specification is an OrderedDict",
"try",
":",
"assert",
"isinstance",
"(",
"specification",
",",
"OrderedDict",
")",
"except",
"AssertionError",
":",
"raise",
"TypeError",
"(",
"\"`specification` must be an OrderedDict.\"",
")",
"# Make sure dataframe is a pandas dataframe",
"assert",
"isinstance",
"(",
"dataframe",
",",
"pd",
".",
"DataFrame",
")",
"problem_cols",
"=",
"[",
"]",
"dataframe_cols",
"=",
"dataframe",
".",
"columns",
"for",
"key",
"in",
"specification",
":",
"if",
"key",
"not",
"in",
"dataframe_cols",
":",
"problem_cols",
".",
"append",
"(",
"key",
")",
"if",
"problem_cols",
"!=",
"[",
"]",
":",
"msg",
"=",
"\"The following keys in the specification are not in 'data':\\n{}\"",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"problem_cols",
")",
")",
"return",
"None"
] | 41.666667 | 0.000521 | [
"def ensure_specification_cols_are_in_dataframe(specification, dataframe):\n",
" \"\"\"\n",
" Checks whether each column in `specification` is in `dataframe`. Raises\n",
" ValueError if any of the columns are not in the dataframe.\n",
"\n",
" Parameters\n",
" ----------\n",
" specification : OrderedDict.\n",
" Keys are a proper subset of the columns in `data`. Values are either a\n",
" list or a single string, \"all_diff\" or \"all_same\". If a list, the\n",
" elements should be:\n",
" - single objects that are in the alternative ID column of `data`\n",
" - lists of objects that are within the alternative ID column of\n",
" `data`. For each single object in the list, a unique column will\n",
" be created (i.e. there will be a unique coefficient for that\n",
" variable in the corresponding utility equation of the\n",
" corresponding alternative). For lists within the\n",
" `specification` values, a single column will be created for all\n",
" the alternatives within the iterable (i.e. there will be one\n",
" common coefficient for the variables in the iterable).\n",
" dataframe : pandas DataFrame.\n",
" Dataframe containing the data for the choice model to be estimated.\n",
"\n",
" Returns\n",
" -------\n",
" None.\n",
" \"\"\"\n",
" # Make sure specification is an OrderedDict\n",
" try:\n",
" assert isinstance(specification, OrderedDict)\n",
" except AssertionError:\n",
" raise TypeError(\"`specification` must be an OrderedDict.\")\n",
" # Make sure dataframe is a pandas dataframe\n",
" assert isinstance(dataframe, pd.DataFrame)\n",
"\n",
" problem_cols = []\n",
" dataframe_cols = dataframe.columns\n",
" for key in specification:\n",
" if key not in dataframe_cols:\n",
" problem_cols.append(key)\n",
" if problem_cols != []:\n",
" msg = \"The following keys in the specification are not in 'data':\\n{}\"\n",
" raise ValueError(msg.format(problem_cols))\n",
"\n",
" return None"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 45 | 0.001481 |
def detect_sv(items, all_items=None, stage="standard"):
"""Top level parallel target for examining structural variation.
"""
items = [utils.to_single_data(x) for x in items]
items = cwlutils.unpack_tarballs(items, items[0])
svcaller = items[0]["config"]["algorithm"].get("svcaller")
caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller)
out = []
if svcaller and caller_fn:
if (all_items and svcaller in _NEEDS_BACKGROUND and
not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)):
names = set([dd.get_sample_name(x) for x in items])
background = [x for x in all_items if dd.get_sample_name(x) not in names]
for svdata in caller_fn(items, background):
out.append([svdata])
else:
for svdata in caller_fn(items):
out.append([svdata])
else:
for data in items:
out.append([data])
# Avoid nesting of callers for CWL runs for easier extraction
if cwlutils.is_cwl_run(items[0]):
out_cwl = []
for data in [utils.to_single_data(x) for x in out]:
# Run validation directly from CWL runs since we're single stage
data = validate.evaluate(data)
data["svvalidate"] = {"summary": tz.get_in(["sv-validate", "csv"], data)}
svs = data.get("sv")
if svs:
assert len(svs) == 1, svs
data["sv"] = svs[0]
else:
data["sv"] = {}
data = _add_supplemental(data)
out_cwl.append([data])
return out_cwl
return out | [
"def",
"detect_sv",
"(",
"items",
",",
"all_items",
"=",
"None",
",",
"stage",
"=",
"\"standard\"",
")",
":",
"items",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"items",
"]",
"items",
"=",
"cwlutils",
".",
"unpack_tarballs",
"(",
"items",
",",
"items",
"[",
"0",
"]",
")",
"svcaller",
"=",
"items",
"[",
"0",
"]",
"[",
"\"config\"",
"]",
"[",
"\"algorithm\"",
"]",
".",
"get",
"(",
"\"svcaller\"",
")",
"caller_fn",
"=",
"_get_callers",
"(",
"items",
",",
"stage",
",",
"special_cases",
"=",
"True",
")",
".",
"get",
"(",
"svcaller",
")",
"out",
"=",
"[",
"]",
"if",
"svcaller",
"and",
"caller_fn",
":",
"if",
"(",
"all_items",
"and",
"svcaller",
"in",
"_NEEDS_BACKGROUND",
"and",
"not",
"vcfutils",
".",
"is_paired_analysis",
"(",
"[",
"x",
".",
"get",
"(",
"\"align_bam\"",
")",
"for",
"x",
"in",
"items",
"]",
",",
"items",
")",
")",
":",
"names",
"=",
"set",
"(",
"[",
"dd",
".",
"get_sample_name",
"(",
"x",
")",
"for",
"x",
"in",
"items",
"]",
")",
"background",
"=",
"[",
"x",
"for",
"x",
"in",
"all_items",
"if",
"dd",
".",
"get_sample_name",
"(",
"x",
")",
"not",
"in",
"names",
"]",
"for",
"svdata",
"in",
"caller_fn",
"(",
"items",
",",
"background",
")",
":",
"out",
".",
"append",
"(",
"[",
"svdata",
"]",
")",
"else",
":",
"for",
"svdata",
"in",
"caller_fn",
"(",
"items",
")",
":",
"out",
".",
"append",
"(",
"[",
"svdata",
"]",
")",
"else",
":",
"for",
"data",
"in",
"items",
":",
"out",
".",
"append",
"(",
"[",
"data",
"]",
")",
"# Avoid nesting of callers for CWL runs for easier extraction",
"if",
"cwlutils",
".",
"is_cwl_run",
"(",
"items",
"[",
"0",
"]",
")",
":",
"out_cwl",
"=",
"[",
"]",
"for",
"data",
"in",
"[",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"out",
"]",
":",
"# Run validation directly from CWL runs since we're single stage",
"data",
"=",
"validate",
".",
"evaluate",
"(",
"data",
")",
"data",
"[",
"\"svvalidate\"",
"]",
"=",
"{",
"\"summary\"",
":",
"tz",
".",
"get_in",
"(",
"[",
"\"sv-validate\"",
",",
"\"csv\"",
"]",
",",
"data",
")",
"}",
"svs",
"=",
"data",
".",
"get",
"(",
"\"sv\"",
")",
"if",
"svs",
":",
"assert",
"len",
"(",
"svs",
")",
"==",
"1",
",",
"svs",
"data",
"[",
"\"sv\"",
"]",
"=",
"svs",
"[",
"0",
"]",
"else",
":",
"data",
"[",
"\"sv\"",
"]",
"=",
"{",
"}",
"data",
"=",
"_add_supplemental",
"(",
"data",
")",
"out_cwl",
".",
"append",
"(",
"[",
"data",
"]",
")",
"return",
"out_cwl",
"return",
"out"
] | 43 | 0.002394 | [
"def detect_sv(items, all_items=None, stage=\"standard\"):\n",
" \"\"\"Top level parallel target for examining structural variation.\n",
" \"\"\"\n",
" items = [utils.to_single_data(x) for x in items]\n",
" items = cwlutils.unpack_tarballs(items, items[0])\n",
" svcaller = items[0][\"config\"][\"algorithm\"].get(\"svcaller\")\n",
" caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller)\n",
" out = []\n",
" if svcaller and caller_fn:\n",
" if (all_items and svcaller in _NEEDS_BACKGROUND and\n",
" not vcfutils.is_paired_analysis([x.get(\"align_bam\") for x in items], items)):\n",
" names = set([dd.get_sample_name(x) for x in items])\n",
" background = [x for x in all_items if dd.get_sample_name(x) not in names]\n",
" for svdata in caller_fn(items, background):\n",
" out.append([svdata])\n",
" else:\n",
" for svdata in caller_fn(items):\n",
" out.append([svdata])\n",
" else:\n",
" for data in items:\n",
" out.append([data])\n",
" # Avoid nesting of callers for CWL runs for easier extraction\n",
" if cwlutils.is_cwl_run(items[0]):\n",
" out_cwl = []\n",
" for data in [utils.to_single_data(x) for x in out]:\n",
" # Run validation directly from CWL runs since we're single stage\n",
" data = validate.evaluate(data)\n",
" data[\"svvalidate\"] = {\"summary\": tz.get_in([\"sv-validate\", \"csv\"], data)}\n",
" svs = data.get(\"sv\")\n",
" if svs:\n",
" assert len(svs) == 1, svs\n",
" data[\"sv\"] = svs[0]\n",
" else:\n",
" data[\"sv\"] = {}\n",
" data = _add_supplemental(data)\n",
" out_cwl.append([data])\n",
" return out_cwl\n",
" return out"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 38 | 0.002772 |
def build_pdf(path_jinja2, template_name, path_outfile, template_kwargs=None):
'''Helper function for building a pdf from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables
'''
latex_template_object = LatexBuild(
path_jinja2,
template_name,
template_kwargs,
)
return latex_template_object.build_pdf(path_outfile) | [
"def",
"build_pdf",
"(",
"path_jinja2",
",",
"template_name",
",",
"path_outfile",
",",
"template_kwargs",
"=",
"None",
")",
":",
"latex_template_object",
"=",
"LatexBuild",
"(",
"path_jinja2",
",",
"template_name",
",",
"template_kwargs",
",",
")",
"return",
"latex_template_object",
".",
"build_pdf",
"(",
"path_outfile",
")"
] | 45.882353 | 0.001256 | [
"def build_pdf(path_jinja2, template_name, path_outfile, template_kwargs=None):\n",
" '''Helper function for building a pdf from a latex jinja2 template\n",
"\n",
" :param path_jinja2: the root directory for latex jinja2 templates\n",
" :param template_name: the relative path, to path_jinja2, to the desired\n",
" jinja2 Latex template\n",
" :param path_outfile: the full path to the desired final output file\n",
" Must contain the same file extension as files generated by\n",
" cmd_wo_infile, otherwise the process will fail\n",
" :param template_kwargs: a dictionary of key/values for jinja2 variables\n",
" '''\n",
" latex_template_object = LatexBuild(\n",
" path_jinja2,\n",
" template_name,\n",
" template_kwargs,\n",
" )\n",
" return latex_template_object.build_pdf(path_outfile)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856
] | 17 | 0.00105 |
def gapfind(model, solver, epsilon=0.001, v_max=1000, implicit_sinks=True):
"""Identify compounds in the model that cannot be produced.
Yields all compounds that cannot be produced. This method
assumes implicit sinks for all compounds in the model so
the only factor that influences whether a compound can be
produced is the presence of the compounds needed to produce it.
Epsilon indicates the threshold amount of reaction flux for the products
to be considered non-blocked. V_max indicates the maximum flux.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
"""
prob = solver.create_problem()
# Set integrality tolerance such that w constraints are correct
min_tol = prob.integrality_tolerance.min
int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)
if int_tol < prob.integrality_tolerance.value:
prob.integrality_tolerance.value = int_tol
# Define flux variables
v = prob.namespace()
for reaction_id in model.reactions:
lower, upper = model.limits[reaction_id]
v.define([reaction_id], lower=lower, upper=upper)
# Define constraints on production of metabolites in reaction
w = prob.namespace(types=lp.VariableType.Binary)
binary_cons_lhs = {compound: 0 for compound in model.compounds}
for spec, value in iteritems(model.matrix):
compound, reaction_id = spec
if value != 0:
w.define([spec])
w_var = w(spec)
lower, upper = (float(x) for x in model.limits[reaction_id])
if value > 0:
dv = v(reaction_id)
else:
dv = -v(reaction_id)
lower, upper = -upper, -lower
prob.add_linear_constraints(
dv <= upper * w_var,
dv >= epsilon + (lower - epsilon) * (1 - w_var))
binary_cons_lhs[compound] += w_var
xp = prob.namespace(model.compounds, types=lp.VariableType.Binary)
objective = xp.sum(model.compounds)
prob.set_objective(objective)
for compound, lhs in iteritems(binary_cons_lhs):
prob.add_linear_constraints(lhs >= xp(compound))
# Define mass balance constraints
massbalance_lhs = {compound: 0 for compound in model.compounds}
for spec, value in iteritems(model.matrix):
compound, reaction_id = spec
massbalance_lhs[compound] += v(reaction_id) * value
for compound, lhs in iteritems(massbalance_lhs):
if implicit_sinks:
# The constraint is merely >0 meaning that we have implicit sinks
# for all compounds.
prob.add_linear_constraints(lhs >= 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
result = prob.solve(lp.ObjectiveSense.Maximize)
except lp.SolverError as e:
raise_from(GapFillError('Failed to solve gapfill: {}'.format(e), e))
for compound in model.compounds:
if result.get_value(xp(compound)) < 0.5:
yield compound | [
"def",
"gapfind",
"(",
"model",
",",
"solver",
",",
"epsilon",
"=",
"0.001",
",",
"v_max",
"=",
"1000",
",",
"implicit_sinks",
"=",
"True",
")",
":",
"prob",
"=",
"solver",
".",
"create_problem",
"(",
")",
"# Set integrality tolerance such that w constraints are correct",
"min_tol",
"=",
"prob",
".",
"integrality_tolerance",
".",
"min",
"int_tol",
"=",
"_find_integer_tolerance",
"(",
"epsilon",
",",
"v_max",
",",
"min_tol",
")",
"if",
"int_tol",
"<",
"prob",
".",
"integrality_tolerance",
".",
"value",
":",
"prob",
".",
"integrality_tolerance",
".",
"value",
"=",
"int_tol",
"# Define flux variables",
"v",
"=",
"prob",
".",
"namespace",
"(",
")",
"for",
"reaction_id",
"in",
"model",
".",
"reactions",
":",
"lower",
",",
"upper",
"=",
"model",
".",
"limits",
"[",
"reaction_id",
"]",
"v",
".",
"define",
"(",
"[",
"reaction_id",
"]",
",",
"lower",
"=",
"lower",
",",
"upper",
"=",
"upper",
")",
"# Define constraints on production of metabolites in reaction",
"w",
"=",
"prob",
".",
"namespace",
"(",
"types",
"=",
"lp",
".",
"VariableType",
".",
"Binary",
")",
"binary_cons_lhs",
"=",
"{",
"compound",
":",
"0",
"for",
"compound",
"in",
"model",
".",
"compounds",
"}",
"for",
"spec",
",",
"value",
"in",
"iteritems",
"(",
"model",
".",
"matrix",
")",
":",
"compound",
",",
"reaction_id",
"=",
"spec",
"if",
"value",
"!=",
"0",
":",
"w",
".",
"define",
"(",
"[",
"spec",
"]",
")",
"w_var",
"=",
"w",
"(",
"spec",
")",
"lower",
",",
"upper",
"=",
"(",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"model",
".",
"limits",
"[",
"reaction_id",
"]",
")",
"if",
"value",
">",
"0",
":",
"dv",
"=",
"v",
"(",
"reaction_id",
")",
"else",
":",
"dv",
"=",
"-",
"v",
"(",
"reaction_id",
")",
"lower",
",",
"upper",
"=",
"-",
"upper",
",",
"-",
"lower",
"prob",
".",
"add_linear_constraints",
"(",
"dv",
"<=",
"upper",
"*",
"w_var",
",",
"dv",
">=",
"epsilon",
"+",
"(",
"lower",
"-",
"epsilon",
")",
"*",
"(",
"1",
"-",
"w_var",
")",
")",
"binary_cons_lhs",
"[",
"compound",
"]",
"+=",
"w_var",
"xp",
"=",
"prob",
".",
"namespace",
"(",
"model",
".",
"compounds",
",",
"types",
"=",
"lp",
".",
"VariableType",
".",
"Binary",
")",
"objective",
"=",
"xp",
".",
"sum",
"(",
"model",
".",
"compounds",
")",
"prob",
".",
"set_objective",
"(",
"objective",
")",
"for",
"compound",
",",
"lhs",
"in",
"iteritems",
"(",
"binary_cons_lhs",
")",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
">=",
"xp",
"(",
"compound",
")",
")",
"# Define mass balance constraints",
"massbalance_lhs",
"=",
"{",
"compound",
":",
"0",
"for",
"compound",
"in",
"model",
".",
"compounds",
"}",
"for",
"spec",
",",
"value",
"in",
"iteritems",
"(",
"model",
".",
"matrix",
")",
":",
"compound",
",",
"reaction_id",
"=",
"spec",
"massbalance_lhs",
"[",
"compound",
"]",
"+=",
"v",
"(",
"reaction_id",
")",
"*",
"value",
"for",
"compound",
",",
"lhs",
"in",
"iteritems",
"(",
"massbalance_lhs",
")",
":",
"if",
"implicit_sinks",
":",
"# The constraint is merely >0 meaning that we have implicit sinks",
"# for all compounds.",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
">=",
"0",
")",
"else",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
"==",
"0",
")",
"# Solve",
"try",
":",
"result",
"=",
"prob",
".",
"solve",
"(",
"lp",
".",
"ObjectiveSense",
".",
"Maximize",
")",
"except",
"lp",
".",
"SolverError",
"as",
"e",
":",
"raise_from",
"(",
"GapFillError",
"(",
"'Failed to solve gapfill: {}'",
".",
"format",
"(",
"e",
")",
",",
"e",
")",
")",
"for",
"compound",
"in",
"model",
".",
"compounds",
":",
"if",
"result",
".",
"get_value",
"(",
"xp",
"(",
"compound",
")",
")",
"<",
"0.5",
":",
"yield",
"compound"
] | 38.640449 | 0.000284 | [
"def gapfind(model, solver, epsilon=0.001, v_max=1000, implicit_sinks=True):\n",
" \"\"\"Identify compounds in the model that cannot be produced.\n",
"\n",
" Yields all compounds that cannot be produced. This method\n",
" assumes implicit sinks for all compounds in the model so\n",
" the only factor that influences whether a compound can be\n",
" produced is the presence of the compounds needed to produce it.\n",
"\n",
" Epsilon indicates the threshold amount of reaction flux for the products\n",
" to be considered non-blocked. V_max indicates the maximum flux.\n",
"\n",
" This method is implemented as a MILP-program. Therefore it may\n",
" not be efficient for larger models.\n",
"\n",
" Args:\n",
" model: :class:`MetabolicModel` containing core reactions and reactions\n",
" that can be added for gap-filling.\n",
" solver: MILP solver instance.\n",
" epsilon: Threshold amount of a compound produced for it to not be\n",
" considered blocked.\n",
" v_max: Maximum flux.\n",
" implicit_sinks: Whether implicit sinks for all compounds are included\n",
" when gap-filling (traditional GapFill uses implicit sinks).\n",
" \"\"\"\n",
" prob = solver.create_problem()\n",
"\n",
" # Set integrality tolerance such that w constraints are correct\n",
" min_tol = prob.integrality_tolerance.min\n",
" int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)\n",
" if int_tol < prob.integrality_tolerance.value:\n",
" prob.integrality_tolerance.value = int_tol\n",
"\n",
" # Define flux variables\n",
" v = prob.namespace()\n",
" for reaction_id in model.reactions:\n",
" lower, upper = model.limits[reaction_id]\n",
" v.define([reaction_id], lower=lower, upper=upper)\n",
"\n",
" # Define constraints on production of metabolites in reaction\n",
" w = prob.namespace(types=lp.VariableType.Binary)\n",
" binary_cons_lhs = {compound: 0 for compound in model.compounds}\n",
" for spec, value in iteritems(model.matrix):\n",
" compound, reaction_id = spec\n",
" if value != 0:\n",
" w.define([spec])\n",
" w_var = w(spec)\n",
"\n",
" lower, upper = (float(x) for x in model.limits[reaction_id])\n",
" if value > 0:\n",
" dv = v(reaction_id)\n",
" else:\n",
" dv = -v(reaction_id)\n",
" lower, upper = -upper, -lower\n",
"\n",
" prob.add_linear_constraints(\n",
" dv <= upper * w_var,\n",
" dv >= epsilon + (lower - epsilon) * (1 - w_var))\n",
"\n",
" binary_cons_lhs[compound] += w_var\n",
"\n",
" xp = prob.namespace(model.compounds, types=lp.VariableType.Binary)\n",
" objective = xp.sum(model.compounds)\n",
" prob.set_objective(objective)\n",
"\n",
" for compound, lhs in iteritems(binary_cons_lhs):\n",
" prob.add_linear_constraints(lhs >= xp(compound))\n",
"\n",
" # Define mass balance constraints\n",
" massbalance_lhs = {compound: 0 for compound in model.compounds}\n",
" for spec, value in iteritems(model.matrix):\n",
" compound, reaction_id = spec\n",
" massbalance_lhs[compound] += v(reaction_id) * value\n",
" for compound, lhs in iteritems(massbalance_lhs):\n",
" if implicit_sinks:\n",
" # The constraint is merely >0 meaning that we have implicit sinks\n",
" # for all compounds.\n",
" prob.add_linear_constraints(lhs >= 0)\n",
" else:\n",
" prob.add_linear_constraints(lhs == 0)\n",
"\n",
" # Solve\n",
" try:\n",
" result = prob.solve(lp.ObjectiveSense.Maximize)\n",
" except lp.SolverError as e:\n",
" raise_from(GapFillError('Failed to solve gapfill: {}'.format(e), e))\n",
"\n",
" for compound in model.compounds:\n",
" if result.get_value(xp(compound)) < 0.5:\n",
" yield compound"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464
] | 89 | 0.000432 |
def predict(self, h=5, oos_data=None, intervals=False):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
intervals : boolean (default: False)
Whether to return prediction intervals
Returns
----------
- pd.DataFrame with predicted values
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
dep_var = self.formula.split("~")[0]
oos_data[dep_var] = oos_data[dep_var].replace(np.nan, 0)
_, X_oos = dmatrices(self.formula, oos_data)
X_oos = np.array([X_oos])[0]
X_pred = X_oos[:h]
# Retrieve data, dates and (transformed) latent variables
mu, Y = self._model(self.latent_variables.get_z_values())
date_index = self.shift_dates(h)
if self.latent_variables.estimation_method in ['M-H']:
sim_vector = self._sim_prediction_bayes(h, X_pred, 15000)
forecasted_values = np.array([np.mean(i) for i in sim_vector])
prediction_01 = np.array([np.percentile(i, 1) for i in sim_vector])
prediction_05 = np.array([np.percentile(i, 5) for i in sim_vector])
prediction_95 = np.array([np.percentile(i, 95) for i in sim_vector])
prediction_99 = np.array([np.percentile(i, 99) for i in sim_vector])
else:
t_z = self.transform_z()
mean_values = self._mean_prediction(mu, Y, h, t_z, X_pred)
if self.model_name2 == "Skewt":
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z)
m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0))
forecasted_values = mean_values[-h:] + (model_skewness - (1.0/model_skewness))*model_scale*m1
else:
forecasted_values = mean_values[-h:]
if intervals is True:
sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 15000)
else:
sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 2)
if intervals is False:
result = pd.DataFrame(forecasted_values)
result.rename(columns={0:self.data_name}, inplace=True)
else:
# Get mean prediction and simulations (for errors)
if self.latent_variables.estimation_method not in ['M-H']:
sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 15000)
prediction_01 = np.array([np.percentile(i, 1) for i in sim_values])
prediction_05 = np.array([np.percentile(i, 5) for i in sim_values])
prediction_95 = np.array([np.percentile(i, 95) for i in sim_values])
prediction_99 = np.array([np.percentile(i, 99) for i in sim_values])
result = pd.DataFrame([forecasted_values, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result | [
"def",
"predict",
"(",
"self",
",",
"h",
"=",
"5",
",",
"oos_data",
"=",
"None",
",",
"intervals",
"=",
"False",
")",
":",
"if",
"self",
".",
"latent_variables",
".",
"estimated",
"is",
"False",
":",
"raise",
"Exception",
"(",
"\"No latent variables estimated!\"",
")",
"else",
":",
"dep_var",
"=",
"self",
".",
"formula",
".",
"split",
"(",
"\"~\"",
")",
"[",
"0",
"]",
"oos_data",
"[",
"dep_var",
"]",
"=",
"oos_data",
"[",
"dep_var",
"]",
".",
"replace",
"(",
"np",
".",
"nan",
",",
"0",
")",
"_",
",",
"X_oos",
"=",
"dmatrices",
"(",
"self",
".",
"formula",
",",
"oos_data",
")",
"X_oos",
"=",
"np",
".",
"array",
"(",
"[",
"X_oos",
"]",
")",
"[",
"0",
"]",
"X_pred",
"=",
"X_oos",
"[",
":",
"h",
"]",
"# Retrieve data, dates and (transformed) latent variables",
"mu",
",",
"Y",
"=",
"self",
".",
"_model",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
")",
"date_index",
"=",
"self",
".",
"shift_dates",
"(",
"h",
")",
"if",
"self",
".",
"latent_variables",
".",
"estimation_method",
"in",
"[",
"'M-H'",
"]",
":",
"sim_vector",
"=",
"self",
".",
"_sim_prediction_bayes",
"(",
"h",
",",
"X_pred",
",",
"15000",
")",
"forecasted_values",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"mean",
"(",
"i",
")",
"for",
"i",
"in",
"sim_vector",
"]",
")",
"prediction_01",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"1",
")",
"for",
"i",
"in",
"sim_vector",
"]",
")",
"prediction_05",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"5",
")",
"for",
"i",
"in",
"sim_vector",
"]",
")",
"prediction_95",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"95",
")",
"for",
"i",
"in",
"sim_vector",
"]",
")",
"prediction_99",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"99",
")",
"for",
"i",
"in",
"sim_vector",
"]",
")",
"else",
":",
"t_z",
"=",
"self",
".",
"transform_z",
"(",
")",
"mean_values",
"=",
"self",
".",
"_mean_prediction",
"(",
"mu",
",",
"Y",
",",
"h",
",",
"t_z",
",",
"X_pred",
")",
"if",
"self",
".",
"model_name2",
"==",
"\"Skewt\"",
":",
"model_scale",
",",
"model_shape",
",",
"model_skewness",
"=",
"self",
".",
"_get_scale_and_shape",
"(",
"t_z",
")",
"m1",
"=",
"(",
"np",
".",
"sqrt",
"(",
"model_shape",
")",
"*",
"sp",
".",
"gamma",
"(",
"(",
"model_shape",
"-",
"1.0",
")",
"/",
"2.0",
")",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"pi",
")",
"*",
"sp",
".",
"gamma",
"(",
"model_shape",
"/",
"2.0",
")",
")",
"forecasted_values",
"=",
"mean_values",
"[",
"-",
"h",
":",
"]",
"+",
"(",
"model_skewness",
"-",
"(",
"1.0",
"/",
"model_skewness",
")",
")",
"*",
"model_scale",
"*",
"m1",
"else",
":",
"forecasted_values",
"=",
"mean_values",
"[",
"-",
"h",
":",
"]",
"if",
"intervals",
"is",
"True",
":",
"sim_values",
"=",
"self",
".",
"_sim_prediction",
"(",
"mu",
",",
"Y",
",",
"h",
",",
"t_z",
",",
"X_pred",
",",
"15000",
")",
"else",
":",
"sim_values",
"=",
"self",
".",
"_sim_prediction",
"(",
"mu",
",",
"Y",
",",
"h",
",",
"t_z",
",",
"X_pred",
",",
"2",
")",
"if",
"intervals",
"is",
"False",
":",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"forecasted_values",
")",
"result",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"self",
".",
"data_name",
"}",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"# Get mean prediction and simulations (for errors)",
"if",
"self",
".",
"latent_variables",
".",
"estimation_method",
"not",
"in",
"[",
"'M-H'",
"]",
":",
"sim_values",
"=",
"self",
".",
"_sim_prediction",
"(",
"mu",
",",
"Y",
",",
"h",
",",
"t_z",
",",
"X_pred",
",",
"15000",
")",
"prediction_01",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"1",
")",
"for",
"i",
"in",
"sim_values",
"]",
")",
"prediction_05",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"5",
")",
"for",
"i",
"in",
"sim_values",
"]",
")",
"prediction_95",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"95",
")",
"for",
"i",
"in",
"sim_values",
"]",
")",
"prediction_99",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"percentile",
"(",
"i",
",",
"99",
")",
"for",
"i",
"in",
"sim_values",
"]",
")",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"forecasted_values",
",",
"prediction_01",
",",
"prediction_05",
",",
"prediction_95",
",",
"prediction_99",
"]",
")",
".",
"T",
"result",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"self",
".",
"data_name",
",",
"1",
":",
"\"1% Prediction Interval\"",
",",
"2",
":",
"\"5% Prediction Interval\"",
",",
"3",
":",
"\"95% Prediction Interval\"",
",",
"4",
":",
"\"99% Prediction Interval\"",
"}",
",",
"inplace",
"=",
"True",
")",
"result",
".",
"index",
"=",
"date_index",
"[",
"-",
"h",
":",
"]",
"return",
"result"
] | 45.3 | 0.008372 | [
"def predict(self, h=5, oos_data=None, intervals=False):\n",
" \"\"\" Makes forecast with the estimated model\n",
"\n",
" Parameters\n",
" ----------\n",
" h : int (default : 5)\n",
" How many steps ahead would you like to forecast?\n",
"\n",
" oos_data : pd.DataFrame\n",
" Data for the variables to be used out of sample (ys can be NaNs)\n",
"\n",
" intervals : boolean (default: False)\n",
" Whether to return prediction intervals\n",
"\n",
" Returns\n",
" ----------\n",
" - pd.DataFrame with predicted values\n",
" \"\"\" \n",
"\n",
" if self.latent_variables.estimated is False:\n",
" raise Exception(\"No latent variables estimated!\")\n",
" else:\n",
"\n",
" dep_var = self.formula.split(\"~\")[0]\n",
" oos_data[dep_var] = oos_data[dep_var].replace(np.nan, 0)\n",
"\n",
" _, X_oos = dmatrices(self.formula, oos_data)\n",
" X_oos = np.array([X_oos])[0]\n",
" X_pred = X_oos[:h]\n",
"\n",
" # Retrieve data, dates and (transformed) latent variables\n",
" mu, Y = self._model(self.latent_variables.get_z_values()) \n",
" date_index = self.shift_dates(h)\n",
"\n",
" if self.latent_variables.estimation_method in ['M-H']:\n",
" sim_vector = self._sim_prediction_bayes(h, X_pred, 15000)\n",
"\n",
" forecasted_values = np.array([np.mean(i) for i in sim_vector])\n",
" prediction_01 = np.array([np.percentile(i, 1) for i in sim_vector])\n",
" prediction_05 = np.array([np.percentile(i, 5) for i in sim_vector])\n",
" prediction_95 = np.array([np.percentile(i, 95) for i in sim_vector])\n",
" prediction_99 = np.array([np.percentile(i, 99) for i in sim_vector])\n",
"\n",
" else:\n",
" t_z = self.transform_z()\n",
" mean_values = self._mean_prediction(mu, Y, h, t_z, X_pred)\n",
"\n",
" if self.model_name2 == \"Skewt\":\n",
" model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z)\n",
" m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0))\n",
" forecasted_values = mean_values[-h:] + (model_skewness - (1.0/model_skewness))*model_scale*m1 \n",
" else:\n",
" forecasted_values = mean_values[-h:] \n",
"\n",
" if intervals is True:\n",
" sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 15000)\n",
" else:\n",
" sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 2)\n",
"\n",
" if intervals is False:\n",
" result = pd.DataFrame(forecasted_values)\n",
" result.rename(columns={0:self.data_name}, inplace=True)\n",
" else:\n",
" # Get mean prediction and simulations (for errors)\n",
" if self.latent_variables.estimation_method not in ['M-H']:\n",
" sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 15000)\n",
" prediction_01 = np.array([np.percentile(i, 1) for i in sim_values])\n",
" prediction_05 = np.array([np.percentile(i, 5) for i in sim_values])\n",
" prediction_95 = np.array([np.percentile(i, 95) for i in sim_values])\n",
" prediction_99 = np.array([np.percentile(i, 99) for i in sim_values])\n",
"\n",
" result = pd.DataFrame([forecasted_values, prediction_01, prediction_05, \n",
" prediction_95, prediction_99]).T\n",
" result.rename(columns={0:self.data_name, 1: \"1% Prediction Interval\", \n",
" 2: \"5% Prediction Interval\", 3: \"95% Prediction Interval\", 4: \"99% Prediction Interval\"}, \n",
" inplace=True)\n",
" \n",
" result.index = date_index[-h:]\n",
"\n",
" return result"
] | [
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.011904761904761904,
0.011764705882352941,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0.008130081300813009,
0.017391304347826087,
0,
0.017241379310344827,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0.013888888888888888,
0,
0,
0,
0.011904761904761904,
0.011363636363636364,
0.011363636363636364,
0.011235955056179775,
0.011235955056179775,
0,
0.02247191011235955,
0.018867924528301886,
0.034482758620689655,
0.02702702702702703,
0.029411764705882353,
0.5,
0,
0,
0.04
] | 80 | 0.012059 |
def set_bpduguard(self, name, value=False, default=False, disable=False):
"""Configures the bpduguard value for the specified interface
Args:
name (string): The interface identifier to configure. The name
must be the full interface name (eg Ethernet1, not Et1)
value (bool): True if bpduguard is enabled otherwise False
default (bool): Configures the bpduguard parameter to its default
value using the EOS CLI default config command
disable (bool): Negates the bpduguard parameter using the EOS
CLI no config command
Returns:
True if the command succeeds, otherwise False
Raises:
ValueError: Rasied if an invalid interface name is specified
TypeError: Raised if the value keyword argument does not evaluate
to a valid boolean
"""
value = 'enable' if value else 'disable'
string = 'spanning-tree bpduguard'
cmds = self.command_builder(string, value=value, default=default,
disable=disable)
return self.configure_interface(name, cmds) | [
"def",
"set_bpduguard",
"(",
"self",
",",
"name",
",",
"value",
"=",
"False",
",",
"default",
"=",
"False",
",",
"disable",
"=",
"False",
")",
":",
"value",
"=",
"'enable'",
"if",
"value",
"else",
"'disable'",
"string",
"=",
"'spanning-tree bpduguard'",
"cmds",
"=",
"self",
".",
"command_builder",
"(",
"string",
",",
"value",
"=",
"value",
",",
"default",
"=",
"default",
",",
"disable",
"=",
"disable",
")",
"return",
"self",
".",
"configure_interface",
"(",
"name",
",",
"cmds",
")"
] | 38.933333 | 0.001671 | [
"def set_bpduguard(self, name, value=False, default=False, disable=False):\n",
" \"\"\"Configures the bpduguard value for the specified interface\n",
"\n",
" Args:\n",
" name (string): The interface identifier to configure. The name\n",
" must be the full interface name (eg Ethernet1, not Et1)\n",
"\n",
" value (bool): True if bpduguard is enabled otherwise False\n",
"\n",
" default (bool): Configures the bpduguard parameter to its default\n",
" value using the EOS CLI default config command\n",
"\n",
" disable (bool): Negates the bpduguard parameter using the EOS\n",
" CLI no config command\n",
"\n",
" Returns:\n",
" True if the command succeeds, otherwise False\n",
"\n",
" Raises:\n",
" ValueError: Rasied if an invalid interface name is specified\n",
"\n",
" TypeError: Raised if the value keyword argument does not evaluate\n",
" to a valid boolean\n",
"\n",
" \"\"\"\n",
" value = 'enable' if value else 'disable'\n",
" string = 'spanning-tree bpduguard'\n",
" cmds = self.command_builder(string, value=value, default=default,\n",
" disable=disable)\n",
" return self.configure_interface(name, cmds)"
] | [
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549
] | 30 | 0.00113 |
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string | [
"def",
"_validate_string",
"(",
"self",
",",
"input_string",
",",
"path_to_root",
",",
"object_title",
"=",
"''",
")",
":",
"rules_path_to_root",
"=",
"re",
".",
"sub",
"(",
"'\\[\\d+\\]'",
",",
"'[0]'",
",",
"path_to_root",
")",
"input_criteria",
"=",
"self",
".",
"keyMap",
"[",
"rules_path_to_root",
"]",
"error_dict",
"=",
"{",
"'object_title'",
":",
"object_title",
",",
"'model_schema'",
":",
"self",
".",
"schema",
",",
"'input_criteria'",
":",
"input_criteria",
",",
"'failed_test'",
":",
"'value_datatype'",
",",
"'input_path'",
":",
"path_to_root",
",",
"'error_value'",
":",
"input_string",
",",
"'error_code'",
":",
"4001",
"}",
"if",
"'byte_data'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_criteria",
"[",
"'byte_data'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'byte_data'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4011",
"try",
":",
"decoded_bytes",
"=",
"b64decode",
"(",
"input_string",
")",
"except",
":",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"not",
"isinstance",
"(",
"decoded_bytes",
",",
"bytes",
")",
":",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'min_value'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
"<",
"input_criteria",
"[",
"'min_value'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'min_value'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4022",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'max_value'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
">",
"input_criteria",
"[",
"'max_value'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'max_value'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4023",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'greater_than'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
"<=",
"input_criteria",
"[",
"'greater_than'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'greater_than'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4024",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'less_than'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
">=",
"input_criteria",
"[",
"'less_than'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'less_than'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4025",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'equal_to'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
"!=",
"input_criteria",
"[",
"'equal_to'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'equal_to'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4026",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'min_length'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"len",
"(",
"input_string",
")",
"<",
"input_criteria",
"[",
"'min_length'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'min_length'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4012",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'max_length'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"len",
"(",
"input_string",
")",
">",
"input_criteria",
"[",
"'max_length'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'max_length'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4013",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'must_not_contain'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"for",
"regex",
"in",
"input_criteria",
"[",
"'must_not_contain'",
"]",
":",
"regex_pattern",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"if",
"regex_pattern",
".",
"findall",
"(",
"input_string",
")",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'must_not_contain'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4014",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'must_contain'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"for",
"regex",
"in",
"input_criteria",
"[",
"'must_contain'",
"]",
":",
"regex_pattern",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"if",
"not",
"regex_pattern",
".",
"findall",
"(",
"input_string",
")",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'must_contain'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4015",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'contains_either'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"regex_match",
"=",
"False",
"for",
"regex",
"in",
"input_criteria",
"[",
"'contains_either'",
"]",
":",
"regex_pattern",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"if",
"regex_pattern",
".",
"findall",
"(",
"input_string",
")",
":",
"regex_match",
"=",
"True",
"if",
"not",
"regex_match",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'contains_either'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4016",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'discrete_values'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
"not",
"in",
"input_criteria",
"[",
"'discrete_values'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'discrete_values'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4041",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"if",
"'excluded_values'",
"in",
"input_criteria",
".",
"keys",
"(",
")",
":",
"if",
"input_string",
"in",
"input_criteria",
"[",
"'excluded_values'",
"]",
":",
"error_dict",
"[",
"'failed_test'",
"]",
"=",
"'excluded_values'",
"error_dict",
"[",
"'error_code'",
"]",
"=",
"4042",
"raise",
"InputValidationError",
"(",
"error_dict",
")",
"# TODO: validate string against identical to reference",
"# TODO: run lambda function and call validation url",
"return",
"input_string"
] | 47.230769 | 0.001196 | [
"def _validate_string(self, input_string, path_to_root, object_title=''):\n",
"\n",
" '''\n",
" a helper method for validating properties of a string\n",
"\n",
" :return: input_string\n",
" '''\n",
"\n",
" rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n",
" input_criteria = self.keyMap[rules_path_to_root]\n",
" error_dict = {\n",
" 'object_title': object_title,\n",
" 'model_schema': self.schema,\n",
" 'input_criteria': input_criteria,\n",
" 'failed_test': 'value_datatype',\n",
" 'input_path': path_to_root,\n",
" 'error_value': input_string,\n",
" 'error_code': 4001\n",
" }\n",
" if 'byte_data' in input_criteria.keys():\n",
" if input_criteria['byte_data']:\n",
" error_dict['failed_test'] = 'byte_data'\n",
" error_dict['error_code'] = 4011\n",
" try:\n",
" decoded_bytes = b64decode(input_string)\n",
" except:\n",
" raise InputValidationError(error_dict)\n",
" if not isinstance(decoded_bytes, bytes):\n",
" raise InputValidationError(error_dict)\n",
" if 'min_value' in input_criteria.keys():\n",
" if input_string < input_criteria['min_value']:\n",
" error_dict['failed_test'] = 'min_value'\n",
" error_dict['error_code'] = 4022\n",
" raise InputValidationError(error_dict)\n",
" if 'max_value' in input_criteria.keys():\n",
" if input_string > input_criteria['max_value']:\n",
" error_dict['failed_test'] = 'max_value'\n",
" error_dict['error_code'] = 4023\n",
" raise InputValidationError(error_dict)\n",
" if 'greater_than' in input_criteria.keys():\n",
" if input_string <= input_criteria['greater_than']:\n",
" error_dict['failed_test'] = 'greater_than'\n",
" error_dict['error_code'] = 4024\n",
" raise InputValidationError(error_dict)\n",
" if 'less_than' in input_criteria.keys():\n",
" if input_string >= input_criteria['less_than']:\n",
" error_dict['failed_test'] = 'less_than'\n",
" error_dict['error_code'] = 4025\n",
" raise InputValidationError(error_dict)\n",
" if 'equal_to' in input_criteria.keys():\n",
" if input_string != input_criteria['equal_to']:\n",
" error_dict['failed_test'] = 'equal_to'\n",
" error_dict['error_code'] = 4026\n",
" raise InputValidationError(error_dict)\n",
" if 'min_length' in input_criteria.keys():\n",
" if len(input_string) < input_criteria['min_length']:\n",
" error_dict['failed_test'] = 'min_length'\n",
" error_dict['error_code'] = 4012\n",
" raise InputValidationError(error_dict)\n",
" if 'max_length' in input_criteria.keys():\n",
" if len(input_string) > input_criteria['max_length']:\n",
" error_dict['failed_test'] = 'max_length'\n",
" error_dict['error_code'] = 4013\n",
" raise InputValidationError(error_dict)\n",
" if 'must_not_contain' in input_criteria.keys():\n",
" for regex in input_criteria['must_not_contain']:\n",
" regex_pattern = re.compile(regex)\n",
" if regex_pattern.findall(input_string):\n",
" error_dict['failed_test'] = 'must_not_contain'\n",
" error_dict['error_code'] = 4014\n",
" raise InputValidationError(error_dict)\n",
" if 'must_contain' in input_criteria.keys():\n",
" for regex in input_criteria['must_contain']:\n",
" regex_pattern = re.compile(regex)\n",
" if not regex_pattern.findall(input_string):\n",
" error_dict['failed_test'] = 'must_contain'\n",
" error_dict['error_code'] = 4015\n",
" raise InputValidationError(error_dict)\n",
" if 'contains_either' in input_criteria.keys():\n",
" regex_match = False\n",
" for regex in input_criteria['contains_either']:\n",
" regex_pattern = re.compile(regex)\n",
" if regex_pattern.findall(input_string):\n",
" regex_match = True\n",
" if not regex_match:\n",
" error_dict['failed_test'] = 'contains_either'\n",
" error_dict['error_code'] = 4016\n",
" raise InputValidationError(error_dict)\n",
" if 'discrete_values' in input_criteria.keys():\n",
" if input_string not in input_criteria['discrete_values']:\n",
" error_dict['failed_test'] = 'discrete_values'\n",
" error_dict['error_code'] = 4041\n",
" raise InputValidationError(error_dict)\n",
" if 'excluded_values' in input_criteria.keys():\n",
" if input_string in input_criteria['excluded_values']:\n",
" error_dict['failed_test'] = 'excluded_values'\n",
" error_dict['error_code'] = 4042\n",
" raise InputValidationError(error_dict)\n",
"\n",
" # TODO: validate string against identical to reference\n",
"\n",
" # TODO: run lambda function and call validation url\n",
"\n",
" return input_string"
] | [
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.04411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035
] | 104 | 0.001982 |
def get_input_list(self):
"""
Description:
Get input list
Returns an ordered list of all available input keys and names
"""
inputs = [' '] * len(self.command['input'])
for key in self.command['input']:
inputs[self.command['input'][key]['order']] = {"key":key, "name":self.command['input'][key]['name']}
return inputs | [
"def",
"get_input_list",
"(",
"self",
")",
":",
"inputs",
"=",
"[",
"' '",
"]",
"*",
"len",
"(",
"self",
".",
"command",
"[",
"'input'",
"]",
")",
"for",
"key",
"in",
"self",
".",
"command",
"[",
"'input'",
"]",
":",
"inputs",
"[",
"self",
".",
"command",
"[",
"'input'",
"]",
"[",
"key",
"]",
"[",
"'order'",
"]",
"]",
"=",
"{",
"\"key\"",
":",
"key",
",",
"\"name\"",
":",
"self",
".",
"command",
"[",
"'input'",
"]",
"[",
"key",
"]",
"[",
"'name'",
"]",
"}",
"return",
"inputs"
] | 32.583333 | 0.012438 | [
"def get_input_list(self):\n",
" \"\"\"\n",
" Description:\n",
"\n",
" Get input list\n",
" Returns an ordered list of all available input keys and names\n",
"\n",
" \"\"\"\n",
" inputs = [' '] * len(self.command['input'])\n",
" for key in self.command['input']:\n",
" inputs[self.command['input'][key]['order']] = {\"key\":key, \"name\":self.command['input'][key]['name']}\n",
" return inputs"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.02654867256637168,
0.047619047619047616
] | 12 | 0.013125 |
def get_mesh_dict(self):
"""Returns calculated mesh sampling phonons
Returns
-------
dict
keys: qpoints, weights, frequencies, eigenvectors, and
group_velocities
Each value for the corresponding key is explained as below.
qpoints: ndarray
q-points in reduced coordinates of reciprocal lattice
dtype='double'
shape=(ir-grid points, 3)
weights: ndarray
Geometric q-point weights. Its sum is the number of grid
points.
dtype='intc'
shape=(ir-grid points,)
frequencies: ndarray
Phonon frequencies at ir-grid points. Imaginary frequenies are
represented by negative real numbers.
dtype='double'
shape=(ir-grid points, bands)
eigenvectors: ndarray
Phonon eigenvectors at ir-grid points. See the data structure
at np.linalg.eigh.
dtype='complex'
shape=(ir-grid points, bands, bands)
group_velocities: ndarray
Phonon group velocities at ir-grid points.
dtype='double'
shape=(ir-grid points, bands, 3)
"""
if self._mesh is None:
msg = ("run_mesh has to be done.")
raise RuntimeError(msg)
retdict = {'qpoints': self._mesh.qpoints,
'weights': self._mesh.weights,
'frequencies': self._mesh.frequencies,
'eigenvectors': self._mesh.eigenvectors,
'group_velocities': self._mesh.group_velocities}
return retdict | [
"def",
"get_mesh_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mesh",
"is",
"None",
":",
"msg",
"=",
"(",
"\"run_mesh has to be done.\"",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"retdict",
"=",
"{",
"'qpoints'",
":",
"self",
".",
"_mesh",
".",
"qpoints",
",",
"'weights'",
":",
"self",
".",
"_mesh",
".",
"weights",
",",
"'frequencies'",
":",
"self",
".",
"_mesh",
".",
"frequencies",
",",
"'eigenvectors'",
":",
"self",
".",
"_mesh",
".",
"eigenvectors",
",",
"'group_velocities'",
":",
"self",
".",
"_mesh",
".",
"group_velocities",
"}",
"return",
"retdict"
] | 36.361702 | 0.00114 | [
"def get_mesh_dict(self):\n",
" \"\"\"Returns calculated mesh sampling phonons\n",
"\n",
" Returns\n",
" -------\n",
" dict\n",
" keys: qpoints, weights, frequencies, eigenvectors, and\n",
" group_velocities\n",
"\n",
" Each value for the corresponding key is explained as below.\n",
"\n",
" qpoints: ndarray\n",
" q-points in reduced coordinates of reciprocal lattice\n",
" dtype='double'\n",
" shape=(ir-grid points, 3)\n",
" weights: ndarray\n",
" Geometric q-point weights. Its sum is the number of grid\n",
" points.\n",
" dtype='intc'\n",
" shape=(ir-grid points,)\n",
" frequencies: ndarray\n",
" Phonon frequencies at ir-grid points. Imaginary frequenies are\n",
" represented by negative real numbers.\n",
" dtype='double'\n",
" shape=(ir-grid points, bands)\n",
" eigenvectors: ndarray\n",
" Phonon eigenvectors at ir-grid points. See the data structure\n",
" at np.linalg.eigh.\n",
" dtype='complex'\n",
" shape=(ir-grid points, bands, bands)\n",
" group_velocities: ndarray\n",
" Phonon group velocities at ir-grid points.\n",
" dtype='double'\n",
" shape=(ir-grid points, bands, 3)\n",
"\n",
" \"\"\"\n",
" if self._mesh is None:\n",
" msg = (\"run_mesh has to be done.\")\n",
" raise RuntimeError(msg)\n",
"\n",
" retdict = {'qpoints': self._mesh.qpoints,\n",
" 'weights': self._mesh.weights,\n",
" 'frequencies': self._mesh.frequencies,\n",
" 'eigenvectors': self._mesh.eigenvectors,\n",
" 'group_velocities': self._mesh.group_velocities}\n",
"\n",
" return retdict"
] | [
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 47 | 0.001376 |
def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0,
sub_vars=True, interactive=False, overwrite=True,
template_renderer=None, out_=sys.stdout):
"""
Copies the ``source`` directory to the ``dest`` directory.
``vars``: A dictionary of variables to use in any substitutions.
``verbosity``: Higher numbers will show more about what is happening.
``simulate``: If true, then don't actually *do* anything.
``indent``: Indent any messages by this amount.
``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+``
in filenames will be substituted.
``overwrite``: If false, then don't ever overwrite anything.
``interactive``: If you are overwriting a file and interactive is
true, then ask before overwriting.
``template_renderer``: This is a function for rendering templates (if you
don't want to use string.Template). It should have the signature
``template_renderer(content_as_string, vars_as_dict,
filename=filename)``.
"""
def out(msg):
out_.write(msg)
out_.write('\n')
out_.flush()
# This allows you to use a leading +dot+ in filenames which would
# otherwise be skipped because leading dots make the file hidden:
vars.setdefault('dot', '.')
vars.setdefault('plus', '+')
use_pkg_resources = isinstance(source, tuple)
if use_pkg_resources:
names = sorted(pkg_resources.resource_listdir(source[0], source[1]))
else:
names = sorted(os.listdir(source))
pad = ' '*(indent*2)
if not os.path.exists(dest):
if verbosity >= 1:
out('%sCreating %s/' % (pad, dest))
if not simulate:
makedirs(dest, verbosity=verbosity, pad=pad)
elif verbosity >= 2:
out('%sDirectory %s exists' % (pad, dest))
for name in names:
if use_pkg_resources:
full = '/'.join([source[1], name])
else:
full = os.path.join(source, name)
reason = should_skip_file(name)
if reason:
if verbosity >= 2:
reason = pad + reason % {'filename': full}
out(reason)
continue # pragma: no cover
if sub_vars:
dest_full = os.path.join(dest, substitute_filename(name, vars))
sub_file = False
if dest_full.endswith('_tmpl'):
dest_full = dest_full[:-5]
sub_file = sub_vars
if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):
if verbosity:
out('%sRecursing into %s' % (pad, os.path.basename(full)))
copy_dir((source[0], full), dest_full, vars, verbosity, simulate,
indent=indent+1,
sub_vars=sub_vars, interactive=interactive,
template_renderer=template_renderer, out_=out_)
continue
elif not use_pkg_resources and os.path.isdir(full):
if verbosity:
out('%sRecursing into %s' % (pad, os.path.basename(full)))
copy_dir(full, dest_full, vars, verbosity, simulate,
indent=indent+1,
sub_vars=sub_vars, interactive=interactive,
template_renderer=template_renderer, out_=out_)
continue
elif use_pkg_resources:
content = pkg_resources.resource_string(source[0], full)
else:
f = open(full, 'rb')
content = f.read()
f.close()
if sub_file:
try:
content = substitute_content(
content, vars, filename=full,
template_renderer=template_renderer
)
except SkipTemplate:
continue # pragma: no cover
if content is None:
continue # pragma: no cover
already_exists = os.path.exists(dest_full)
if already_exists:
f = open(dest_full, 'rb')
old_content = f.read()
f.close()
if old_content == content:
if verbosity:
out('%s%s already exists (same content)' %
(pad, dest_full))
continue # pragma: no cover
if interactive:
if not query_interactive(
native_(full, fsenc), native_(dest_full, fsenc),
native_(content, fsenc), native_(old_content, fsenc),
simulate=simulate, out_=out_):
continue
elif not overwrite:
continue # pragma: no cover
if verbosity and use_pkg_resources:
out('%sCopying %s to %s' % (pad, full, dest_full))
elif verbosity:
out(
'%sCopying %s to %s' % (pad, os.path.basename(full),
dest_full))
if not simulate:
f = open(dest_full, 'wb')
f.write(content)
f.close() | [
"def",
"copy_dir",
"(",
"source",
",",
"dest",
",",
"vars",
",",
"verbosity",
"=",
"1",
",",
"simulate",
"=",
"False",
",",
"indent",
"=",
"0",
",",
"sub_vars",
"=",
"True",
",",
"interactive",
"=",
"False",
",",
"overwrite",
"=",
"True",
",",
"template_renderer",
"=",
"None",
",",
"out_",
"=",
"sys",
".",
"stdout",
")",
":",
"def",
"out",
"(",
"msg",
")",
":",
"out_",
".",
"write",
"(",
"msg",
")",
"out_",
".",
"write",
"(",
"'\\n'",
")",
"out_",
".",
"flush",
"(",
")",
"# This allows you to use a leading +dot+ in filenames which would",
"# otherwise be skipped because leading dots make the file hidden:",
"vars",
".",
"setdefault",
"(",
"'dot'",
",",
"'.'",
")",
"vars",
".",
"setdefault",
"(",
"'plus'",
",",
"'+'",
")",
"use_pkg_resources",
"=",
"isinstance",
"(",
"source",
",",
"tuple",
")",
"if",
"use_pkg_resources",
":",
"names",
"=",
"sorted",
"(",
"pkg_resources",
".",
"resource_listdir",
"(",
"source",
"[",
"0",
"]",
",",
"source",
"[",
"1",
"]",
")",
")",
"else",
":",
"names",
"=",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"source",
")",
")",
"pad",
"=",
"' '",
"*",
"(",
"indent",
"*",
"2",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"if",
"verbosity",
">=",
"1",
":",
"out",
"(",
"'%sCreating %s/'",
"%",
"(",
"pad",
",",
"dest",
")",
")",
"if",
"not",
"simulate",
":",
"makedirs",
"(",
"dest",
",",
"verbosity",
"=",
"verbosity",
",",
"pad",
"=",
"pad",
")",
"elif",
"verbosity",
">=",
"2",
":",
"out",
"(",
"'%sDirectory %s exists'",
"%",
"(",
"pad",
",",
"dest",
")",
")",
"for",
"name",
"in",
"names",
":",
"if",
"use_pkg_resources",
":",
"full",
"=",
"'/'",
".",
"join",
"(",
"[",
"source",
"[",
"1",
"]",
",",
"name",
"]",
")",
"else",
":",
"full",
"=",
"os",
".",
"path",
".",
"join",
"(",
"source",
",",
"name",
")",
"reason",
"=",
"should_skip_file",
"(",
"name",
")",
"if",
"reason",
":",
"if",
"verbosity",
">=",
"2",
":",
"reason",
"=",
"pad",
"+",
"reason",
"%",
"{",
"'filename'",
":",
"full",
"}",
"out",
"(",
"reason",
")",
"continue",
"# pragma: no cover",
"if",
"sub_vars",
":",
"dest_full",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"substitute_filename",
"(",
"name",
",",
"vars",
")",
")",
"sub_file",
"=",
"False",
"if",
"dest_full",
".",
"endswith",
"(",
"'_tmpl'",
")",
":",
"dest_full",
"=",
"dest_full",
"[",
":",
"-",
"5",
"]",
"sub_file",
"=",
"sub_vars",
"if",
"use_pkg_resources",
"and",
"pkg_resources",
".",
"resource_isdir",
"(",
"source",
"[",
"0",
"]",
",",
"full",
")",
":",
"if",
"verbosity",
":",
"out",
"(",
"'%sRecursing into %s'",
"%",
"(",
"pad",
",",
"os",
".",
"path",
".",
"basename",
"(",
"full",
")",
")",
")",
"copy_dir",
"(",
"(",
"source",
"[",
"0",
"]",
",",
"full",
")",
",",
"dest_full",
",",
"vars",
",",
"verbosity",
",",
"simulate",
",",
"indent",
"=",
"indent",
"+",
"1",
",",
"sub_vars",
"=",
"sub_vars",
",",
"interactive",
"=",
"interactive",
",",
"template_renderer",
"=",
"template_renderer",
",",
"out_",
"=",
"out_",
")",
"continue",
"elif",
"not",
"use_pkg_resources",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"full",
")",
":",
"if",
"verbosity",
":",
"out",
"(",
"'%sRecursing into %s'",
"%",
"(",
"pad",
",",
"os",
".",
"path",
".",
"basename",
"(",
"full",
")",
")",
")",
"copy_dir",
"(",
"full",
",",
"dest_full",
",",
"vars",
",",
"verbosity",
",",
"simulate",
",",
"indent",
"=",
"indent",
"+",
"1",
",",
"sub_vars",
"=",
"sub_vars",
",",
"interactive",
"=",
"interactive",
",",
"template_renderer",
"=",
"template_renderer",
",",
"out_",
"=",
"out_",
")",
"continue",
"elif",
"use_pkg_resources",
":",
"content",
"=",
"pkg_resources",
".",
"resource_string",
"(",
"source",
"[",
"0",
"]",
",",
"full",
")",
"else",
":",
"f",
"=",
"open",
"(",
"full",
",",
"'rb'",
")",
"content",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"if",
"sub_file",
":",
"try",
":",
"content",
"=",
"substitute_content",
"(",
"content",
",",
"vars",
",",
"filename",
"=",
"full",
",",
"template_renderer",
"=",
"template_renderer",
")",
"except",
"SkipTemplate",
":",
"continue",
"# pragma: no cover",
"if",
"content",
"is",
"None",
":",
"continue",
"# pragma: no cover",
"already_exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"dest_full",
")",
"if",
"already_exists",
":",
"f",
"=",
"open",
"(",
"dest_full",
",",
"'rb'",
")",
"old_content",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"if",
"old_content",
"==",
"content",
":",
"if",
"verbosity",
":",
"out",
"(",
"'%s%s already exists (same content)'",
"%",
"(",
"pad",
",",
"dest_full",
")",
")",
"continue",
"# pragma: no cover",
"if",
"interactive",
":",
"if",
"not",
"query_interactive",
"(",
"native_",
"(",
"full",
",",
"fsenc",
")",
",",
"native_",
"(",
"dest_full",
",",
"fsenc",
")",
",",
"native_",
"(",
"content",
",",
"fsenc",
")",
",",
"native_",
"(",
"old_content",
",",
"fsenc",
")",
",",
"simulate",
"=",
"simulate",
",",
"out_",
"=",
"out_",
")",
":",
"continue",
"elif",
"not",
"overwrite",
":",
"continue",
"# pragma: no cover",
"if",
"verbosity",
"and",
"use_pkg_resources",
":",
"out",
"(",
"'%sCopying %s to %s'",
"%",
"(",
"pad",
",",
"full",
",",
"dest_full",
")",
")",
"elif",
"verbosity",
":",
"out",
"(",
"'%sCopying %s to %s'",
"%",
"(",
"pad",
",",
"os",
".",
"path",
".",
"basename",
"(",
"full",
")",
",",
"dest_full",
")",
")",
"if",
"not",
"simulate",
":",
"f",
"=",
"open",
"(",
"dest_full",
",",
"'wb'",
")",
"f",
".",
"write",
"(",
"content",
")",
"f",
".",
"close",
"(",
")"
] | 38.984 | 0.002401 | [
"def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0,\n",
" sub_vars=True, interactive=False, overwrite=True,\n",
" template_renderer=None, out_=sys.stdout):\n",
" \"\"\"\n",
" Copies the ``source`` directory to the ``dest`` directory.\n",
"\n",
" ``vars``: A dictionary of variables to use in any substitutions.\n",
"\n",
" ``verbosity``: Higher numbers will show more about what is happening.\n",
"\n",
" ``simulate``: If true, then don't actually *do* anything.\n",
"\n",
" ``indent``: Indent any messages by this amount.\n",
"\n",
" ``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+``\n",
" in filenames will be substituted.\n",
"\n",
" ``overwrite``: If false, then don't ever overwrite anything.\n",
"\n",
" ``interactive``: If you are overwriting a file and interactive is\n",
" true, then ask before overwriting.\n",
"\n",
" ``template_renderer``: This is a function for rendering templates (if you\n",
" don't want to use string.Template). It should have the signature\n",
" ``template_renderer(content_as_string, vars_as_dict,\n",
" filename=filename)``.\n",
" \"\"\"\n",
" def out(msg):\n",
" out_.write(msg)\n",
" out_.write('\\n')\n",
" out_.flush()\n",
" # This allows you to use a leading +dot+ in filenames which would\n",
" # otherwise be skipped because leading dots make the file hidden:\n",
" vars.setdefault('dot', '.')\n",
" vars.setdefault('plus', '+')\n",
" use_pkg_resources = isinstance(source, tuple)\n",
" if use_pkg_resources:\n",
" names = sorted(pkg_resources.resource_listdir(source[0], source[1]))\n",
" else:\n",
" names = sorted(os.listdir(source))\n",
" pad = ' '*(indent*2)\n",
" if not os.path.exists(dest):\n",
" if verbosity >= 1:\n",
" out('%sCreating %s/' % (pad, dest))\n",
" if not simulate:\n",
" makedirs(dest, verbosity=verbosity, pad=pad)\n",
" elif verbosity >= 2:\n",
" out('%sDirectory %s exists' % (pad, dest))\n",
" for name in names:\n",
" if use_pkg_resources:\n",
" full = '/'.join([source[1], name])\n",
" else:\n",
" full = os.path.join(source, name)\n",
" reason = should_skip_file(name)\n",
" if reason:\n",
" if verbosity >= 2:\n",
" reason = pad + reason % {'filename': full}\n",
" out(reason)\n",
" continue # pragma: no cover\n",
" if sub_vars:\n",
" dest_full = os.path.join(dest, substitute_filename(name, vars))\n",
" sub_file = False\n",
" if dest_full.endswith('_tmpl'):\n",
" dest_full = dest_full[:-5]\n",
" sub_file = sub_vars\n",
" if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):\n",
" if verbosity:\n",
" out('%sRecursing into %s' % (pad, os.path.basename(full)))\n",
" copy_dir((source[0], full), dest_full, vars, verbosity, simulate,\n",
" indent=indent+1,\n",
" sub_vars=sub_vars, interactive=interactive,\n",
" template_renderer=template_renderer, out_=out_)\n",
" continue\n",
" elif not use_pkg_resources and os.path.isdir(full):\n",
" if verbosity:\n",
" out('%sRecursing into %s' % (pad, os.path.basename(full)))\n",
" copy_dir(full, dest_full, vars, verbosity, simulate,\n",
" indent=indent+1,\n",
" sub_vars=sub_vars, interactive=interactive,\n",
" template_renderer=template_renderer, out_=out_)\n",
" continue\n",
" elif use_pkg_resources:\n",
" content = pkg_resources.resource_string(source[0], full)\n",
" else:\n",
" f = open(full, 'rb')\n",
" content = f.read()\n",
" f.close()\n",
" if sub_file:\n",
" try:\n",
" content = substitute_content(\n",
" content, vars, filename=full,\n",
" template_renderer=template_renderer\n",
" )\n",
" except SkipTemplate:\n",
" continue # pragma: no cover\n",
" if content is None:\n",
" continue # pragma: no cover\n",
" already_exists = os.path.exists(dest_full)\n",
" if already_exists:\n",
" f = open(dest_full, 'rb')\n",
" old_content = f.read()\n",
" f.close()\n",
" if old_content == content:\n",
" if verbosity:\n",
" out('%s%s already exists (same content)' %\n",
" (pad, dest_full))\n",
" continue # pragma: no cover\n",
" if interactive:\n",
" if not query_interactive(\n",
" native_(full, fsenc), native_(dest_full, fsenc),\n",
" native_(content, fsenc), native_(old_content, fsenc),\n",
" simulate=simulate, out_=out_):\n",
" continue\n",
" elif not overwrite:\n",
" continue # pragma: no cover\n",
" if verbosity and use_pkg_resources:\n",
" out('%sCopying %s to %s' % (pad, full, dest_full))\n",
" elif verbosity:\n",
" out(\n",
" '%sCopying %s to %s' % (pad, os.path.basename(full),\n",
" dest_full))\n",
" if not simulate:\n",
" f = open(dest_full, 'wb')\n",
" f.write(content)\n",
" f.close()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0.016666666666666666,
0.015625,
0,
0,
0,
0,
0,
0.030303030303030304,
0.016666666666666666,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 125 | 0.002285 |
def close(self):
"""
Closes this cloud.
"""
if not (yield from super().close()):
return False
for nio in self._nios.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
yield from self._stop_ubridge()
log.info('Cloud "{name}" [{id}] has been closed'.format(name=self._name, id=self._id)) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"(",
"yield",
"from",
"super",
"(",
")",
".",
"close",
"(",
")",
")",
":",
"return",
"False",
"for",
"nio",
"in",
"self",
".",
"_nios",
".",
"values",
"(",
")",
":",
"if",
"nio",
"and",
"isinstance",
"(",
"nio",
",",
"NIOUDP",
")",
":",
"self",
".",
"manager",
".",
"port_manager",
".",
"release_udp_port",
"(",
"nio",
".",
"lport",
",",
"self",
".",
"_project",
")",
"yield",
"from",
"self",
".",
"_stop_ubridge",
"(",
")",
"log",
".",
"info",
"(",
"'Cloud \"{name}\" [{id}] has been closed'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
")",
")"
] | 31.071429 | 0.008929 | [
"def close(self):\n",
" \"\"\"\n",
" Closes this cloud.\n",
" \"\"\"\n",
"\n",
" if not (yield from super().close()):\n",
" return False\n",
"\n",
" for nio in self._nios.values():\n",
" if nio and isinstance(nio, NIOUDP):\n",
" self.manager.port_manager.release_udp_port(nio.lport, self._project)\n",
"\n",
" yield from self._stop_ubridge()\n",
" log.info('Cloud \"{name}\" [{id}] has been closed'.format(name=self._name, id=self._id))"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0.02127659574468085
] | 14 | 0.008312 |
def _check_voSet(orb,kwargs,funcName):
"""Function to check whether vo is set, because it's required for funcName"""
if not orb._voSet and kwargs.get('vo',None) is None:
warnings.warn("Method %s(.) requires vo to be given at Orbit initialization or at method evaluation; using default vo which is %f km/s" % (funcName,orb._vo),
galpyWarning) | [
"def",
"_check_voSet",
"(",
"orb",
",",
"kwargs",
",",
"funcName",
")",
":",
"if",
"not",
"orb",
".",
"_voSet",
"and",
"kwargs",
".",
"get",
"(",
"'vo'",
",",
"None",
")",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"Method %s(.) requires vo to be given at Orbit initialization or at method evaluation; using default vo which is %f km/s\"",
"%",
"(",
"funcName",
",",
"orb",
".",
"_vo",
")",
",",
"galpyWarning",
")"
] | 75 | 0.01847 | [
"def _check_voSet(orb,kwargs,funcName):\n",
" \"\"\"Function to check whether vo is set, because it's required for funcName\"\"\"\n",
" if not orb._voSet and kwargs.get('vo',None) is None:\n",
" warnings.warn(\"Method %s(.) requires vo to be given at Orbit initialization or at method evaluation; using default vo which is %f km/s\" % (funcName,orb._vo),\n",
" galpyWarning)"
] | [
0.05128205128205128,
0.012195121951219513,
0.017543859649122806,
0.012048192771084338,
0.02857142857142857
] | 5 | 0.024328 |
def findExtname(fimg, extname, extver=None):
"""
Returns the list number of the extension corresponding to EXTNAME given.
"""
i = 0
extnum = None
for chip in fimg:
hdr = chip.header
if 'EXTNAME' in hdr:
if hdr['EXTNAME'].strip() == extname.upper():
if extver is None or hdr['EXTVER'] == extver:
extnum = i
break
i += 1
return extnum | [
"def",
"findExtname",
"(",
"fimg",
",",
"extname",
",",
"extver",
"=",
"None",
")",
":",
"i",
"=",
"0",
"extnum",
"=",
"None",
"for",
"chip",
"in",
"fimg",
":",
"hdr",
"=",
"chip",
".",
"header",
"if",
"'EXTNAME'",
"in",
"hdr",
":",
"if",
"hdr",
"[",
"'EXTNAME'",
"]",
".",
"strip",
"(",
")",
"==",
"extname",
".",
"upper",
"(",
")",
":",
"if",
"extver",
"is",
"None",
"or",
"hdr",
"[",
"'EXTVER'",
"]",
"==",
"extver",
":",
"extnum",
"=",
"i",
"break",
"i",
"+=",
"1",
"return",
"extnum"
] | 27.375 | 0.002208 | [
"def findExtname(fimg, extname, extver=None):\n",
" \"\"\"\n",
" Returns the list number of the extension corresponding to EXTNAME given.\n",
" \"\"\"\n",
"\n",
" i = 0\n",
" extnum = None\n",
" for chip in fimg:\n",
" hdr = chip.header\n",
" if 'EXTNAME' in hdr:\n",
" if hdr['EXTNAME'].strip() == extname.upper():\n",
" if extver is None or hdr['EXTVER'] == extver:\n",
" extnum = i\n",
" break\n",
" i += 1\n",
" return extnum"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 16 | 0.003676 |
def _EvaluateNumberOfElements(self, context):
"""Evaluates number of elements.
Args:
context (DataTypeMapContext): data type map context.
Returns:
int: number of elements.
Raises:
MappingError: if the number of elements cannot be determined.
"""
number_of_elements = None
if self._data_type_definition.number_of_elements:
number_of_elements = self._data_type_definition.number_of_elements
elif self._data_type_definition.number_of_elements_expression:
expression = self._data_type_definition.number_of_elements_expression
namespace = {}
if context and context.values:
namespace.update(context.values)
# Make sure __builtins__ contains an empty dictionary.
namespace['__builtins__'] = {}
try:
number_of_elements = eval(expression, namespace) # pylint: disable=eval-used
except Exception as exception:
raise errors.MappingError(
'Unable to determine number of elements with error: {0!s}'.format(
exception))
if number_of_elements is None or number_of_elements < 0:
raise errors.MappingError(
'Invalid number of elements: {0!s}'.format(number_of_elements))
return number_of_elements | [
"def",
"_EvaluateNumberOfElements",
"(",
"self",
",",
"context",
")",
":",
"number_of_elements",
"=",
"None",
"if",
"self",
".",
"_data_type_definition",
".",
"number_of_elements",
":",
"number_of_elements",
"=",
"self",
".",
"_data_type_definition",
".",
"number_of_elements",
"elif",
"self",
".",
"_data_type_definition",
".",
"number_of_elements_expression",
":",
"expression",
"=",
"self",
".",
"_data_type_definition",
".",
"number_of_elements_expression",
"namespace",
"=",
"{",
"}",
"if",
"context",
"and",
"context",
".",
"values",
":",
"namespace",
".",
"update",
"(",
"context",
".",
"values",
")",
"# Make sure __builtins__ contains an empty dictionary.",
"namespace",
"[",
"'__builtins__'",
"]",
"=",
"{",
"}",
"try",
":",
"number_of_elements",
"=",
"eval",
"(",
"expression",
",",
"namespace",
")",
"# pylint: disable=eval-used",
"except",
"Exception",
"as",
"exception",
":",
"raise",
"errors",
".",
"MappingError",
"(",
"'Unable to determine number of elements with error: {0!s}'",
".",
"format",
"(",
"exception",
")",
")",
"if",
"number_of_elements",
"is",
"None",
"or",
"number_of_elements",
"<",
"0",
":",
"raise",
"errors",
".",
"MappingError",
"(",
"'Invalid number of elements: {0!s}'",
".",
"format",
"(",
"number_of_elements",
")",
")",
"return",
"number_of_elements"
] | 34.055556 | 0.008723 | [
"def _EvaluateNumberOfElements(self, context):\n",
" \"\"\"Evaluates number of elements.\n",
"\n",
" Args:\n",
" context (DataTypeMapContext): data type map context.\n",
"\n",
" Returns:\n",
" int: number of elements.\n",
"\n",
" Raises:\n",
" MappingError: if the number of elements cannot be determined.\n",
" \"\"\"\n",
" number_of_elements = None\n",
" if self._data_type_definition.number_of_elements:\n",
" number_of_elements = self._data_type_definition.number_of_elements\n",
"\n",
" elif self._data_type_definition.number_of_elements_expression:\n",
" expression = self._data_type_definition.number_of_elements_expression\n",
" namespace = {}\n",
" if context and context.values:\n",
" namespace.update(context.values)\n",
" # Make sure __builtins__ contains an empty dictionary.\n",
" namespace['__builtins__'] = {}\n",
"\n",
" try:\n",
" number_of_elements = eval(expression, namespace) # pylint: disable=eval-used\n",
" except Exception as exception:\n",
" raise errors.MappingError(\n",
" 'Unable to determine number of elements with error: {0!s}'.format(\n",
" exception))\n",
"\n",
" if number_of_elements is None or number_of_elements < 0:\n",
" raise errors.MappingError(\n",
" 'Invalid number of elements: {0!s}'.format(number_of_elements))\n",
"\n",
" return number_of_elements"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0,
0.013157894736842105,
0.047619047619047616,
0.02702702702702703,
0,
0.01639344262295082,
0.02702702702702703,
0,
0.09090909090909091,
0.011627906976744186,
0.02702702702702703,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0.034482758620689655
] | 36 | 0.009424 |
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings():
simplefilter("ignore", np.ComplexWarning)
values = ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values, dtype=np.object)
return ensure_object(values), 'object', 'object' | [
"def",
"_ensure_data",
"(",
"values",
",",
"dtype",
"=",
"None",
")",
":",
"# we check some simple dtypes first",
"try",
":",
"if",
"is_object_dtype",
"(",
"dtype",
")",
":",
"return",
"ensure_object",
"(",
"np",
".",
"asarray",
"(",
"values",
")",
")",
",",
"'object'",
",",
"'object'",
"if",
"is_bool_dtype",
"(",
"values",
")",
"or",
"is_bool_dtype",
"(",
"dtype",
")",
":",
"# we are actually coercing to uint64",
"# until our algos support uint8 directly (see TODO)",
"return",
"np",
".",
"asarray",
"(",
"values",
")",
".",
"astype",
"(",
"'uint64'",
")",
",",
"'bool'",
",",
"'uint64'",
"elif",
"is_signed_integer_dtype",
"(",
"values",
")",
"or",
"is_signed_integer_dtype",
"(",
"dtype",
")",
":",
"return",
"ensure_int64",
"(",
"values",
")",
",",
"'int64'",
",",
"'int64'",
"elif",
"(",
"is_unsigned_integer_dtype",
"(",
"values",
")",
"or",
"is_unsigned_integer_dtype",
"(",
"dtype",
")",
")",
":",
"return",
"ensure_uint64",
"(",
"values",
")",
",",
"'uint64'",
",",
"'uint64'",
"elif",
"is_float_dtype",
"(",
"values",
")",
"or",
"is_float_dtype",
"(",
"dtype",
")",
":",
"return",
"ensure_float64",
"(",
"values",
")",
",",
"'float64'",
",",
"'float64'",
"elif",
"is_object_dtype",
"(",
"values",
")",
"and",
"dtype",
"is",
"None",
":",
"return",
"ensure_object",
"(",
"np",
".",
"asarray",
"(",
"values",
")",
")",
",",
"'object'",
",",
"'object'",
"elif",
"is_complex_dtype",
"(",
"values",
")",
"or",
"is_complex_dtype",
"(",
"dtype",
")",
":",
"# ignore the fact that we are casting to float",
"# which discards complex parts",
"with",
"catch_warnings",
"(",
")",
":",
"simplefilter",
"(",
"\"ignore\"",
",",
"np",
".",
"ComplexWarning",
")",
"values",
"=",
"ensure_float64",
"(",
"values",
")",
"return",
"values",
",",
"'float64'",
",",
"'float64'",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"OverflowError",
")",
":",
"# if we are trying to coerce to a dtype",
"# and it is incompat this will fall thru to here",
"return",
"ensure_object",
"(",
"values",
")",
",",
"'object'",
",",
"'object'",
"# datetimelike",
"if",
"(",
"needs_i8_conversion",
"(",
"values",
")",
"or",
"is_period_dtype",
"(",
"dtype",
")",
"or",
"is_datetime64_any_dtype",
"(",
"dtype",
")",
"or",
"is_timedelta64_dtype",
"(",
"dtype",
")",
")",
":",
"if",
"is_period_dtype",
"(",
"values",
")",
"or",
"is_period_dtype",
"(",
"dtype",
")",
":",
"from",
"pandas",
"import",
"PeriodIndex",
"values",
"=",
"PeriodIndex",
"(",
"values",
")",
"dtype",
"=",
"values",
".",
"dtype",
"elif",
"is_timedelta64_dtype",
"(",
"values",
")",
"or",
"is_timedelta64_dtype",
"(",
"dtype",
")",
":",
"from",
"pandas",
"import",
"TimedeltaIndex",
"values",
"=",
"TimedeltaIndex",
"(",
"values",
")",
"dtype",
"=",
"values",
".",
"dtype",
"else",
":",
"# Datetime",
"from",
"pandas",
"import",
"DatetimeIndex",
"values",
"=",
"DatetimeIndex",
"(",
"values",
")",
"dtype",
"=",
"values",
".",
"dtype",
"return",
"values",
".",
"asi8",
",",
"dtype",
",",
"'int64'",
"elif",
"(",
"is_categorical_dtype",
"(",
"values",
")",
"and",
"(",
"is_categorical_dtype",
"(",
"dtype",
")",
"or",
"dtype",
"is",
"None",
")",
")",
":",
"values",
"=",
"getattr",
"(",
"values",
",",
"'values'",
",",
"values",
")",
"values",
"=",
"values",
".",
"codes",
"dtype",
"=",
"'category'",
"# we are actually coercing to int64",
"# until our algos support int* directly (not all do)",
"values",
"=",
"ensure_int64",
"(",
"values",
")",
"return",
"values",
",",
"dtype",
",",
"'int64'",
"# we have failed, return object",
"values",
"=",
"np",
".",
"asarray",
"(",
"values",
",",
"dtype",
"=",
"np",
".",
"object",
")",
"return",
"ensure_object",
"(",
"values",
")",
",",
"'object'",
",",
"'object'"
] | 35.673913 | 0.000296 | [
"def _ensure_data(values, dtype=None):\n",
" \"\"\"\n",
" routine to ensure that our data is of the correct\n",
" input dtype for lower-level routines\n",
"\n",
" This will coerce:\n",
" - ints -> int64\n",
" - uint -> uint64\n",
" - bool -> uint64 (TODO this should be uint8)\n",
" - datetimelike -> i8\n",
" - datetime64tz -> i8 (in local tz)\n",
" - categorical -> codes\n",
"\n",
" Parameters\n",
" ----------\n",
" values : array-like\n",
" dtype : pandas_dtype, optional\n",
" coerce to this dtype\n",
"\n",
" Returns\n",
" -------\n",
" (ndarray, pandas_dtype, algo dtype as a string)\n",
"\n",
" \"\"\"\n",
"\n",
" # we check some simple dtypes first\n",
" try:\n",
" if is_object_dtype(dtype):\n",
" return ensure_object(np.asarray(values)), 'object', 'object'\n",
" if is_bool_dtype(values) or is_bool_dtype(dtype):\n",
" # we are actually coercing to uint64\n",
" # until our algos support uint8 directly (see TODO)\n",
" return np.asarray(values).astype('uint64'), 'bool', 'uint64'\n",
" elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):\n",
" return ensure_int64(values), 'int64', 'int64'\n",
" elif (is_unsigned_integer_dtype(values) or\n",
" is_unsigned_integer_dtype(dtype)):\n",
" return ensure_uint64(values), 'uint64', 'uint64'\n",
" elif is_float_dtype(values) or is_float_dtype(dtype):\n",
" return ensure_float64(values), 'float64', 'float64'\n",
" elif is_object_dtype(values) and dtype is None:\n",
" return ensure_object(np.asarray(values)), 'object', 'object'\n",
" elif is_complex_dtype(values) or is_complex_dtype(dtype):\n",
"\n",
" # ignore the fact that we are casting to float\n",
" # which discards complex parts\n",
" with catch_warnings():\n",
" simplefilter(\"ignore\", np.ComplexWarning)\n",
" values = ensure_float64(values)\n",
" return values, 'float64', 'float64'\n",
"\n",
" except (TypeError, ValueError, OverflowError):\n",
" # if we are trying to coerce to a dtype\n",
" # and it is incompat this will fall thru to here\n",
" return ensure_object(values), 'object', 'object'\n",
"\n",
" # datetimelike\n",
" if (needs_i8_conversion(values) or\n",
" is_period_dtype(dtype) or\n",
" is_datetime64_any_dtype(dtype) or\n",
" is_timedelta64_dtype(dtype)):\n",
" if is_period_dtype(values) or is_period_dtype(dtype):\n",
" from pandas import PeriodIndex\n",
" values = PeriodIndex(values)\n",
" dtype = values.dtype\n",
" elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):\n",
" from pandas import TimedeltaIndex\n",
" values = TimedeltaIndex(values)\n",
" dtype = values.dtype\n",
" else:\n",
" # Datetime\n",
" from pandas import DatetimeIndex\n",
" values = DatetimeIndex(values)\n",
" dtype = values.dtype\n",
"\n",
" return values.asi8, dtype, 'int64'\n",
"\n",
" elif (is_categorical_dtype(values) and\n",
" (is_categorical_dtype(dtype) or dtype is None)):\n",
" values = getattr(values, 'values', values)\n",
" values = values.codes\n",
" dtype = 'category'\n",
"\n",
" # we are actually coercing to int64\n",
" # until our algos support int* directly (not all do)\n",
" values = ensure_int64(values)\n",
"\n",
" return values, dtype, 'int64'\n",
"\n",
" # we have failed, return object\n",
" values = np.asarray(values, dtype=np.object)\n",
" return ensure_object(values), 'object', 'object'"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232
] | 92 | 0.000209 |
def _newsToDF(n):
'''internal'''
df = pd.DataFrame(n)
_toDatetime(df)
_reindex(df, 'datetime')
return df | [
"def",
"_newsToDF",
"(",
"n",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"n",
")",
"_toDatetime",
"(",
"df",
")",
"_reindex",
"(",
"df",
",",
"'datetime'",
")",
"return",
"df"
] | 19.833333 | 0.008065 | [
"def _newsToDF(n):\n",
" '''internal'''\n",
" df = pd.DataFrame(n)\n",
" _toDatetime(df)\n",
" _reindex(df, 'datetime')\n",
" return df"
] | [
0,
0,
0,
0,
0,
0.07692307692307693
] | 6 | 0.012821 |
def f1_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score | [
"def",
"f1_score",
"(",
"y_true",
",",
"y_pred",
",",
"average",
"=",
"'micro'",
",",
"suffix",
"=",
"False",
")",
":",
"true_entities",
"=",
"set",
"(",
"get_entities",
"(",
"y_true",
",",
"suffix",
")",
")",
"pred_entities",
"=",
"set",
"(",
"get_entities",
"(",
"y_pred",
",",
"suffix",
")",
")",
"nb_correct",
"=",
"len",
"(",
"true_entities",
"&",
"pred_entities",
")",
"nb_pred",
"=",
"len",
"(",
"pred_entities",
")",
"nb_true",
"=",
"len",
"(",
"true_entities",
")",
"p",
"=",
"nb_correct",
"/",
"nb_pred",
"if",
"nb_pred",
">",
"0",
"else",
"0",
"r",
"=",
"nb_correct",
"/",
"nb_true",
"if",
"nb_true",
">",
"0",
"else",
"0",
"score",
"=",
"2",
"*",
"p",
"*",
"r",
"/",
"(",
"p",
"+",
"r",
")",
"if",
"p",
"+",
"r",
">",
"0",
"else",
"0",
"return",
"score"
] | 36.166667 | 0.002244 | [
"def f1_score(y_true, y_pred, average='micro', suffix=False):\n",
" \"\"\"Compute the F1 score.\n",
"\n",
" The F1 score can be interpreted as a weighted average of the precision and\n",
" recall, where an F1 score reaches its best value at 1 and worst score at 0.\n",
" The relative contribution of precision and recall to the F1 score are\n",
" equal. The formula for the F1 score is::\n",
"\n",
" F1 = 2 * (precision * recall) / (precision + recall)\n",
"\n",
" Args:\n",
" y_true : 2d array. Ground truth (correct) target values.\n",
" y_pred : 2d array. Estimated targets as returned by a tagger.\n",
"\n",
" Returns:\n",
" score : float.\n",
"\n",
" Example:\n",
" >>> from seqeval.metrics import f1_score\n",
" >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n",
" >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n",
" >>> f1_score(y_true, y_pred)\n",
" 0.50\n",
" \"\"\"\n",
" true_entities = set(get_entities(y_true, suffix))\n",
" pred_entities = set(get_entities(y_pred, suffix))\n",
"\n",
" nb_correct = len(true_entities & pred_entities)\n",
" nb_pred = len(pred_entities)\n",
" nb_true = len(true_entities)\n",
"\n",
" p = nb_correct / nb_pred if nb_pred > 0 else 0\n",
" r = nb_correct / nb_true if nb_true > 0 else 0\n",
" score = 2 * p * r / (p + r) if p + r > 0 else 0\n",
"\n",
" return score"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0.009615384615384616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 36 | 0.002284 |
def _query_pageant(msg):
"""
Communication with the Pageant process is done through a shared
memory-mapped file.
"""
hwnd = _get_pageant_window_object()
if not hwnd:
# Raise a failure to connect exception, pageant isn't running anymore!
return None
# create a name for the mmap
map_name = "PageantRequest%08x" % thread.get_ident()
pymap = _winapi.MemoryMap(
map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user()
)
with pymap:
pymap.write(msg)
# Create an array buffer containing the mapped filename
char_buffer = array.array("b", b(map_name) + zero_byte) # noqa
char_buffer_address, char_buffer_size = char_buffer.buffer_info()
# Create a string to use for the SendMessage function call
cds = COPYDATASTRUCT(
_AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address
)
response = ctypes.windll.user32.SendMessageA(
hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds)
)
if response > 0:
pymap.seek(0)
datalen = pymap.read(4)
retlen = struct.unpack(">I", datalen)[0]
return datalen + pymap.read(retlen)
return None | [
"def",
"_query_pageant",
"(",
"msg",
")",
":",
"hwnd",
"=",
"_get_pageant_window_object",
"(",
")",
"if",
"not",
"hwnd",
":",
"# Raise a failure to connect exception, pageant isn't running anymore!",
"return",
"None",
"# create a name for the mmap",
"map_name",
"=",
"\"PageantRequest%08x\"",
"%",
"thread",
".",
"get_ident",
"(",
")",
"pymap",
"=",
"_winapi",
".",
"MemoryMap",
"(",
"map_name",
",",
"_AGENT_MAX_MSGLEN",
",",
"_winapi",
".",
"get_security_attributes_for_user",
"(",
")",
")",
"with",
"pymap",
":",
"pymap",
".",
"write",
"(",
"msg",
")",
"# Create an array buffer containing the mapped filename",
"char_buffer",
"=",
"array",
".",
"array",
"(",
"\"b\"",
",",
"b",
"(",
"map_name",
")",
"+",
"zero_byte",
")",
"# noqa",
"char_buffer_address",
",",
"char_buffer_size",
"=",
"char_buffer",
".",
"buffer_info",
"(",
")",
"# Create a string to use for the SendMessage function call",
"cds",
"=",
"COPYDATASTRUCT",
"(",
"_AGENT_COPYDATA_ID",
",",
"char_buffer_size",
",",
"char_buffer_address",
")",
"response",
"=",
"ctypes",
".",
"windll",
".",
"user32",
".",
"SendMessageA",
"(",
"hwnd",
",",
"win32con_WM_COPYDATA",
",",
"ctypes",
".",
"sizeof",
"(",
"cds",
")",
",",
"ctypes",
".",
"byref",
"(",
"cds",
")",
")",
"if",
"response",
">",
"0",
":",
"pymap",
".",
"seek",
"(",
"0",
")",
"datalen",
"=",
"pymap",
".",
"read",
"(",
"4",
")",
"retlen",
"=",
"struct",
".",
"unpack",
"(",
"\">I\"",
",",
"datalen",
")",
"[",
"0",
"]",
"return",
"datalen",
"+",
"pymap",
".",
"read",
"(",
"retlen",
")",
"return",
"None"
] | 34.5 | 0.000783 | [
"def _query_pageant(msg):\n",
" \"\"\"\n",
" Communication with the Pageant process is done through a shared\n",
" memory-mapped file.\n",
" \"\"\"\n",
" hwnd = _get_pageant_window_object()\n",
" if not hwnd:\n",
" # Raise a failure to connect exception, pageant isn't running anymore!\n",
" return None\n",
"\n",
" # create a name for the mmap\n",
" map_name = \"PageantRequest%08x\" % thread.get_ident()\n",
"\n",
" pymap = _winapi.MemoryMap(\n",
" map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user()\n",
" )\n",
" with pymap:\n",
" pymap.write(msg)\n",
" # Create an array buffer containing the mapped filename\n",
" char_buffer = array.array(\"b\", b(map_name) + zero_byte) # noqa\n",
" char_buffer_address, char_buffer_size = char_buffer.buffer_info()\n",
" # Create a string to use for the SendMessage function call\n",
" cds = COPYDATASTRUCT(\n",
" _AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address\n",
" )\n",
"\n",
" response = ctypes.windll.user32.SendMessageA(\n",
" hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds)\n",
" )\n",
"\n",
" if response > 0:\n",
" pymap.seek(0)\n",
" datalen = pymap.read(4)\n",
" retlen = struct.unpack(\">I\", datalen)[0]\n",
" return datalen + pymap.read(retlen)\n",
" return None"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 36 | 0.001462 |
def download(*packages, **kwargs):
'''
Download packages to the local disk.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
CLI example:
.. code-block:: bash
salt '*' pkg.download httpd
salt '*' pkg.download httpd postfix
'''
if not packages:
raise SaltInvocationError('No packages specified')
root = kwargs.get('root', None)
refresh = kwargs.get('refresh', False)
if refresh:
refresh_db(root)
pkg_ret = {}
for dld_result in __zypper__(root=root).xml.call('download', *packages).getElementsByTagName("download-result"):
repo = dld_result.getElementsByTagName("repository")[0]
path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path")
pkg_info = {
'repository-name': repo.getAttribute('name'),
'repository-alias': repo.getAttribute('alias'),
'path': path,
}
key = _get_first_aggregate_text(
dld_result.getElementsByTagName('name')
)
if __salt__['lowpkg.checksum'](pkg_info['path'], root=root):
pkg_ret[key] = pkg_info
if pkg_ret:
failed = [pkg for pkg in packages if pkg not in pkg_ret]
if failed:
pkg_ret['_error'] = ('The following package(s) failed to download: {0}'.format(', '.join(failed)))
return pkg_ret
raise CommandExecutionError(
'Unable to download packages: {0}'.format(', '.join(packages))
) | [
"def",
"download",
"(",
"*",
"packages",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"packages",
":",
"raise",
"SaltInvocationError",
"(",
"'No packages specified'",
")",
"root",
"=",
"kwargs",
".",
"get",
"(",
"'root'",
",",
"None",
")",
"refresh",
"=",
"kwargs",
".",
"get",
"(",
"'refresh'",
",",
"False",
")",
"if",
"refresh",
":",
"refresh_db",
"(",
"root",
")",
"pkg_ret",
"=",
"{",
"}",
"for",
"dld_result",
"in",
"__zypper__",
"(",
"root",
"=",
"root",
")",
".",
"xml",
".",
"call",
"(",
"'download'",
",",
"*",
"packages",
")",
".",
"getElementsByTagName",
"(",
"\"download-result\"",
")",
":",
"repo",
"=",
"dld_result",
".",
"getElementsByTagName",
"(",
"\"repository\"",
")",
"[",
"0",
"]",
"path",
"=",
"dld_result",
".",
"getElementsByTagName",
"(",
"\"localfile\"",
")",
"[",
"0",
"]",
".",
"getAttribute",
"(",
"\"path\"",
")",
"pkg_info",
"=",
"{",
"'repository-name'",
":",
"repo",
".",
"getAttribute",
"(",
"'name'",
")",
",",
"'repository-alias'",
":",
"repo",
".",
"getAttribute",
"(",
"'alias'",
")",
",",
"'path'",
":",
"path",
",",
"}",
"key",
"=",
"_get_first_aggregate_text",
"(",
"dld_result",
".",
"getElementsByTagName",
"(",
"'name'",
")",
")",
"if",
"__salt__",
"[",
"'lowpkg.checksum'",
"]",
"(",
"pkg_info",
"[",
"'path'",
"]",
",",
"root",
"=",
"root",
")",
":",
"pkg_ret",
"[",
"key",
"]",
"=",
"pkg_info",
"if",
"pkg_ret",
":",
"failed",
"=",
"[",
"pkg",
"for",
"pkg",
"in",
"packages",
"if",
"pkg",
"not",
"in",
"pkg_ret",
"]",
"if",
"failed",
":",
"pkg_ret",
"[",
"'_error'",
"]",
"=",
"(",
"'The following package(s) failed to download: {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"failed",
")",
")",
")",
"return",
"pkg_ret",
"raise",
"CommandExecutionError",
"(",
"'Unable to download packages: {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"packages",
")",
")",
")"
] | 30.346154 | 0.002455 | [
"def download(*packages, **kwargs):\n",
" '''\n",
" Download packages to the local disk.\n",
"\n",
" refresh\n",
" force a refresh if set to True.\n",
" If set to False (default) it depends on zypper if a refresh is\n",
" executed.\n",
"\n",
" root\n",
" operate on a different root directory.\n",
"\n",
" CLI example:\n",
"\n",
" .. code-block:: bash\n",
"\n",
" salt '*' pkg.download httpd\n",
" salt '*' pkg.download httpd postfix\n",
" '''\n",
" if not packages:\n",
" raise SaltInvocationError('No packages specified')\n",
"\n",
" root = kwargs.get('root', None)\n",
"\n",
" refresh = kwargs.get('refresh', False)\n",
" if refresh:\n",
" refresh_db(root)\n",
"\n",
" pkg_ret = {}\n",
" for dld_result in __zypper__(root=root).xml.call('download', *packages).getElementsByTagName(\"download-result\"):\n",
" repo = dld_result.getElementsByTagName(\"repository\")[0]\n",
" path = dld_result.getElementsByTagName(\"localfile\")[0].getAttribute(\"path\")\n",
" pkg_info = {\n",
" 'repository-name': repo.getAttribute('name'),\n",
" 'repository-alias': repo.getAttribute('alias'),\n",
" 'path': path,\n",
" }\n",
" key = _get_first_aggregate_text(\n",
" dld_result.getElementsByTagName('name')\n",
" )\n",
" if __salt__['lowpkg.checksum'](pkg_info['path'], root=root):\n",
" pkg_ret[key] = pkg_info\n",
"\n",
" if pkg_ret:\n",
" failed = [pkg for pkg in packages if pkg not in pkg_ret]\n",
" if failed:\n",
" pkg_ret['_error'] = ('The following package(s) failed to download: {0}'.format(', '.join(failed)))\n",
" return pkg_ret\n",
"\n",
" raise CommandExecutionError(\n",
" 'Unable to download packages: {0}'.format(', '.join(packages))\n",
" )"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008547008547008548,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0.2
] | 52 | 0.004413 |
def send(MESSAGE, SOCKET, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key.upper(), val) for key, val in kwargs.items())
return sendv(SOCKET, *args) | [
"def",
"send",
"(",
"MESSAGE",
",",
"SOCKET",
",",
"MESSAGE_ID",
"=",
"None",
",",
"CODE_FILE",
"=",
"None",
",",
"CODE_LINE",
"=",
"None",
",",
"CODE_FUNC",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"[",
"'MESSAGE='",
"+",
"MESSAGE",
"]",
"if",
"MESSAGE_ID",
"is",
"not",
"None",
":",
"id",
"=",
"getattr",
"(",
"MESSAGE_ID",
",",
"'hex'",
",",
"MESSAGE_ID",
")",
"args",
".",
"append",
"(",
"'MESSAGE_ID='",
"+",
"id",
")",
"if",
"CODE_LINE",
"==",
"CODE_FILE",
"==",
"CODE_FUNC",
"==",
"None",
":",
"CODE_FILE",
",",
"CODE_LINE",
",",
"CODE_FUNC",
"=",
"_traceback",
".",
"extract_stack",
"(",
"limit",
"=",
"2",
")",
"[",
"0",
"]",
"[",
":",
"3",
"]",
"if",
"CODE_FILE",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"'CODE_FILE='",
"+",
"CODE_FILE",
")",
"if",
"CODE_LINE",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"'CODE_LINE={:d}'",
".",
"format",
"(",
"CODE_LINE",
")",
")",
"if",
"CODE_FUNC",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"'CODE_FUNC='",
"+",
"CODE_FUNC",
")",
"args",
".",
"extend",
"(",
"_make_line",
"(",
"key",
".",
"upper",
"(",
")",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"return",
"sendv",
"(",
"SOCKET",
",",
"*",
"args",
")"
] | 36.836735 | 0.001079 | [
"def send(MESSAGE, SOCKET, MESSAGE_ID=None,\n",
" CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,\n",
" **kwargs):\n",
" r\"\"\"Send a message to the journal.\n",
"\n",
" >>> journal.send('Hello world')\n",
" >>> journal.send('Hello, again, world', FIELD2='Greetings!')\n",
" >>> journal.send('Binary message', BINARY=b'\\xde\\xad\\xbe\\xef')\n",
"\n",
" Value of the MESSAGE argument will be used for the MESSAGE=\n",
" field. MESSAGE must be a string and will be sent as UTF-8 to\n",
" the journal.\n",
"\n",
" MESSAGE_ID can be given to uniquely identify the type of\n",
" message. It must be a string or a uuid.UUID object.\n",
"\n",
" CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to\n",
" identify the caller. Unless at least on of the three is given,\n",
" values are extracted from the stack frame of the caller of\n",
" send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE\n",
" must be an integer.\n",
"\n",
" Additional fields for the journal entry can only be specified\n",
" as keyword arguments. The payload can be either a string or\n",
" bytes. A string will be sent as UTF-8, and bytes will be sent\n",
" as-is to the journal.\n",
"\n",
" Other useful fields include PRIORITY, SYSLOG_FACILITY,\n",
" SYSLOG_IDENTIFIER, SYSLOG_PID.\n",
" \"\"\"\n",
"\n",
" args = ['MESSAGE=' + MESSAGE]\n",
"\n",
" if MESSAGE_ID is not None:\n",
" id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)\n",
" args.append('MESSAGE_ID=' + id)\n",
"\n",
" if CODE_LINE == CODE_FILE == CODE_FUNC == None:\n",
" CODE_FILE, CODE_LINE, CODE_FUNC = \\\n",
" _traceback.extract_stack(limit=2)[0][:3]\n",
" if CODE_FILE is not None:\n",
" args.append('CODE_FILE=' + CODE_FILE)\n",
" if CODE_LINE is not None:\n",
" args.append('CODE_LINE={:d}'.format(CODE_LINE))\n",
" if CODE_FUNC is not None:\n",
" args.append('CODE_FUNC=' + CODE_FUNC)\n",
"\n",
" args.extend(_make_line(key.upper(), val) for key, val in kwargs.items())\n",
" return sendv(SOCKET, *args)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903
] | 49 | 0.001051 |
def _jdn(self):
"""Return the Julian date number for the given date."""
if self._last_updated == "gdate":
return conv.gdate_to_jdn(self.gdate)
return conv.hdate_to_jdn(self.hdate) | [
"def",
"_jdn",
"(",
"self",
")",
":",
"if",
"self",
".",
"_last_updated",
"==",
"\"gdate\"",
":",
"return",
"conv",
".",
"gdate_to_jdn",
"(",
"self",
".",
"gdate",
")",
"return",
"conv",
".",
"hdate_to_jdn",
"(",
"self",
".",
"hdate",
")"
] | 42.2 | 0.009302 | [
"def _jdn(self):\n",
" \"\"\"Return the Julian date number for the given date.\"\"\"\n",
" if self._last_updated == \"gdate\":\n",
" return conv.gdate_to_jdn(self.gdate)\n",
" return conv.hdate_to_jdn(self.hdate)"
] | [
0,
0.015625,
0,
0,
0.022727272727272728
] | 5 | 0.00767 |
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
""" Set html field with correct iframe. """
if self.url:
iframe_html = '<iframe src="{}" frameborder="0" title="{}" allowfullscreen></iframe>'
self.html = iframe_html.format(
self.get_embed_url(),
self.title
)
return super().save(force_insert, force_update, using, update_fields) | [
"def",
"save",
"(",
"self",
",",
"force_insert",
"=",
"False",
",",
"force_update",
"=",
"False",
",",
"using",
"=",
"None",
",",
"update_fields",
"=",
"None",
")",
":",
"if",
"self",
".",
"url",
":",
"iframe_html",
"=",
"'<iframe src=\"{}\" frameborder=\"0\" title=\"{}\" allowfullscreen></iframe>'",
"self",
".",
"html",
"=",
"iframe_html",
".",
"format",
"(",
"self",
".",
"get_embed_url",
"(",
")",
",",
"self",
".",
"title",
")",
"return",
"super",
"(",
")",
".",
"save",
"(",
"force_insert",
",",
"force_update",
",",
"using",
",",
"update_fields",
")"
] | 50.111111 | 0.008715 | [
"def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n",
" \"\"\" Set html field with correct iframe. \"\"\"\n",
" if self.url:\n",
" iframe_html = '<iframe src=\"{}\" frameborder=\"0\" title=\"{}\" allowfullscreen></iframe>'\n",
" self.html = iframe_html.format(\n",
" self.get_embed_url(),\n",
" self.title\n",
" )\n",
" return super().save(force_insert, force_update, using, update_fields)"
] | [
0.011363636363636364,
0.019230769230769232,
0,
0.01020408163265306,
0,
0,
0,
0,
0.012987012987012988
] | 9 | 0.005976 |
def update_generators():
'''Update the context of all generators
Ads useful variables and translations into the template context
and interlink translations
'''
for generator in _GENERATOR_DB.keys():
install_templates_translations(generator)
add_variables_to_context(generator)
interlink_static_files(generator)
interlink_removed_content(generator)
interlink_translated_content(generator) | [
"def",
"update_generators",
"(",
")",
":",
"for",
"generator",
"in",
"_GENERATOR_DB",
".",
"keys",
"(",
")",
":",
"install_templates_translations",
"(",
"generator",
")",
"add_variables_to_context",
"(",
"generator",
")",
"interlink_static_files",
"(",
"generator",
")",
"interlink_removed_content",
"(",
"generator",
")",
"interlink_translated_content",
"(",
"generator",
")"
] | 36.416667 | 0.002232 | [
"def update_generators():\n",
" '''Update the context of all generators\n",
"\n",
" Ads useful variables and translations into the template context\n",
" and interlink translations\n",
" '''\n",
" for generator in _GENERATOR_DB.keys():\n",
" install_templates_translations(generator)\n",
" add_variables_to_context(generator)\n",
" interlink_static_files(generator)\n",
" interlink_removed_content(generator)\n",
" interlink_translated_content(generator)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085
] | 12 | 0.001773 |
def is_extension_type(arr):
"""
Check whether an array-like is of a pandas extension class instance.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is of a pandas extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
>>> is_extension_type(pd.SparseSeries([1, 2, 3]))
True
>>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
"""
if is_categorical(arr):
return True
elif is_sparse(arr):
return True
elif is_datetime64tz_dtype(arr):
return True
return False | [
"def",
"is_extension_type",
"(",
"arr",
")",
":",
"if",
"is_categorical",
"(",
"arr",
")",
":",
"return",
"True",
"elif",
"is_sparse",
"(",
"arr",
")",
":",
"return",
"True",
"elif",
"is_datetime64tz_dtype",
"(",
"arr",
")",
":",
"return",
"True",
"return",
"False"
] | 25.245614 | 0.000669 | [
"def is_extension_type(arr):\n",
" \"\"\"\n",
" Check whether an array-like is of a pandas extension class instance.\n",
"\n",
" Extension classes include categoricals, pandas sparse objects (i.e.\n",
" classes represented within the pandas library and not ones external\n",
" to it like scipy sparse matrices), and datetime-like arrays.\n",
"\n",
" Parameters\n",
" ----------\n",
" arr : array-like\n",
" The array-like to check.\n",
"\n",
" Returns\n",
" -------\n",
" boolean\n",
" Whether or not the array-like is of a pandas extension class instance.\n",
"\n",
" Examples\n",
" --------\n",
" >>> is_extension_type([1, 2, 3])\n",
" False\n",
" >>> is_extension_type(np.array([1, 2, 3]))\n",
" False\n",
" >>>\n",
" >>> cat = pd.Categorical([1, 2, 3])\n",
" >>>\n",
" >>> is_extension_type(cat)\n",
" True\n",
" >>> is_extension_type(pd.Series(cat))\n",
" True\n",
" >>> is_extension_type(pd.SparseArray([1, 2, 3]))\n",
" True\n",
" >>> is_extension_type(pd.SparseSeries([1, 2, 3]))\n",
" True\n",
" >>>\n",
" >>> from scipy.sparse import bsr_matrix\n",
" >>> is_extension_type(bsr_matrix([1, 2, 3]))\n",
" False\n",
" >>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))\n",
" False\n",
" >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n",
" True\n",
" >>>\n",
" >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n",
" >>> s = pd.Series([], dtype=dtype)\n",
" >>> is_extension_type(s)\n",
" True\n",
" \"\"\"\n",
"\n",
" if is_categorical(arr):\n",
" return True\n",
" elif is_sparse(arr):\n",
" return True\n",
" elif is_datetime64tz_dtype(arr):\n",
" return True\n",
" return False"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 57 | 0.001096 |
def expires(duration, vary=None, currtime=time.time):
"""Decorator. Apply on a :class:`wsgiservice.Resource` method to set the
max-age cache control parameter to the given duration. Also calculates
the correct ``Expires`` response header.
:param duration: Age which this resource may have before becoming stale.
:type duration: :mod:`datetime.timedelta`
:param vary: List of headers that should be added to the Vary response
header.
:type vary: list of strings
:param currtime: Function used to find out the current UTC time. This is
used for testing and not required in production code.
:type currtime: Function returning a :mod:`time.struct_time`
"""
if isinstance(duration, timedelta):
duration = timedelta_to_seconds(duration)
@decorator
def _expires(func, *args, **kwargs):
"Sets the expirations header to the given duration."
res = args[0].response
res.cache_control.max_age = duration
res.expires = currtime() + duration
if vary:
if res.vary is None:
res.vary = vary
else:
# A bit completed because res.vary is usually a tuple.
res.vary = list(set(list(res.vary) + list(vary)))
return func(*args, **kwargs)
return _expires | [
"def",
"expires",
"(",
"duration",
",",
"vary",
"=",
"None",
",",
"currtime",
"=",
"time",
".",
"time",
")",
":",
"if",
"isinstance",
"(",
"duration",
",",
"timedelta",
")",
":",
"duration",
"=",
"timedelta_to_seconds",
"(",
"duration",
")",
"@",
"decorator",
"def",
"_expires",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"Sets the expirations header to the given duration.\"",
"res",
"=",
"args",
"[",
"0",
"]",
".",
"response",
"res",
".",
"cache_control",
".",
"max_age",
"=",
"duration",
"res",
".",
"expires",
"=",
"currtime",
"(",
")",
"+",
"duration",
"if",
"vary",
":",
"if",
"res",
".",
"vary",
"is",
"None",
":",
"res",
".",
"vary",
"=",
"vary",
"else",
":",
"# A bit completed because res.vary is usually a tuple.",
"res",
".",
"vary",
"=",
"list",
"(",
"set",
"(",
"list",
"(",
"res",
".",
"vary",
")",
"+",
"list",
"(",
"vary",
")",
")",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_expires"
] | 38.911765 | 0.000737 | [
"def expires(duration, vary=None, currtime=time.time):\n",
" \"\"\"Decorator. Apply on a :class:`wsgiservice.Resource` method to set the\n",
" max-age cache control parameter to the given duration. Also calculates\n",
" the correct ``Expires`` response header.\n",
"\n",
" :param duration: Age which this resource may have before becoming stale.\n",
" :type duration: :mod:`datetime.timedelta`\n",
" :param vary: List of headers that should be added to the Vary response\n",
" header.\n",
" :type vary: list of strings\n",
" :param currtime: Function used to find out the current UTC time. This is\n",
" used for testing and not required in production code.\n",
" :type currtime: Function returning a :mod:`time.struct_time`\n",
" \"\"\"\n",
" if isinstance(duration, timedelta):\n",
" duration = timedelta_to_seconds(duration)\n",
"\n",
" @decorator\n",
" def _expires(func, *args, **kwargs):\n",
" \"Sets the expirations header to the given duration.\"\n",
" res = args[0].response\n",
"\n",
" res.cache_control.max_age = duration\n",
" res.expires = currtime() + duration\n",
"\n",
" if vary:\n",
" if res.vary is None:\n",
" res.vary = vary\n",
" else:\n",
" # A bit completed because res.vary is usually a tuple.\n",
" res.vary = list(set(list(res.vary) + list(vary)))\n",
"\n",
" return func(*args, **kwargs)\n",
" return _expires"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 34 | 0.001548 |
def _notify_add_at(self, index, length=1):
"""Notify about an AddChange at a caertain index and length."""
slice_ = self._slice_at(index, length)
self._notify_add(slice_) | [
"def",
"_notify_add_at",
"(",
"self",
",",
"index",
",",
"length",
"=",
"1",
")",
":",
"slice_",
"=",
"self",
".",
"_slice_at",
"(",
"index",
",",
"length",
")",
"self",
".",
"_notify_add",
"(",
"slice_",
")"
] | 47.75 | 0.010309 | [
"def _notify_add_at(self, index, length=1):\n",
" \"\"\"Notify about an AddChange at a caertain index and length.\"\"\"\n",
" slice_ = self._slice_at(index, length)\n",
" self._notify_add(slice_)"
] | [
0,
0.013888888888888888,
0,
0.03125
] | 4 | 0.011285 |
def sky(lon=None,lat=None,size=1):
"""
Outputs uniform points on sphere from:
[0 < lon < 360] & [-90 < lat < 90]
"""
if lon is None:
umin,umax = 0,1
else:
lon = np.asarray(lon)
lon = np.radians(lon + 360.*(lon<0))
if lon.size==1: umin=umax=lon/(2*np.pi)
elif lon.size==2: umin,umax=lon/(2*np.pi)
else: raise Exception('...')
if lat is None:
vmin,vmax = -1,1
else:
lat = np.asarray(lat)
lat = np.radians(90 - lat)
if lat.size==1: vmin=vmax=np.cos(lat)
elif lat.size==2: vmin,vmax=np.cos(lat)
else: raise Exception('...')
phi = 2*np.pi*np.random.uniform(umin,umax,size=size)
theta = np.arcsin(np.random.uniform(vmin,vmax,size=size))
return np.degrees(phi),np.degrees(theta) | [
"def",
"sky",
"(",
"lon",
"=",
"None",
",",
"lat",
"=",
"None",
",",
"size",
"=",
"1",
")",
":",
"if",
"lon",
"is",
"None",
":",
"umin",
",",
"umax",
"=",
"0",
",",
"1",
"else",
":",
"lon",
"=",
"np",
".",
"asarray",
"(",
"lon",
")",
"lon",
"=",
"np",
".",
"radians",
"(",
"lon",
"+",
"360.",
"*",
"(",
"lon",
"<",
"0",
")",
")",
"if",
"lon",
".",
"size",
"==",
"1",
":",
"umin",
"=",
"umax",
"=",
"lon",
"/",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"elif",
"lon",
".",
"size",
"==",
"2",
":",
"umin",
",",
"umax",
"=",
"lon",
"/",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"else",
":",
"raise",
"Exception",
"(",
"'...'",
")",
"if",
"lat",
"is",
"None",
":",
"vmin",
",",
"vmax",
"=",
"-",
"1",
",",
"1",
"else",
":",
"lat",
"=",
"np",
".",
"asarray",
"(",
"lat",
")",
"lat",
"=",
"np",
".",
"radians",
"(",
"90",
"-",
"lat",
")",
"if",
"lat",
".",
"size",
"==",
"1",
":",
"vmin",
"=",
"vmax",
"=",
"np",
".",
"cos",
"(",
"lat",
")",
"elif",
"lat",
".",
"size",
"==",
"2",
":",
"vmin",
",",
"vmax",
"=",
"np",
".",
"cos",
"(",
"lat",
")",
"else",
":",
"raise",
"Exception",
"(",
"'...'",
")",
"phi",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"random",
".",
"uniform",
"(",
"umin",
",",
"umax",
",",
"size",
"=",
"size",
")",
"theta",
"=",
"np",
".",
"arcsin",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"vmin",
",",
"vmax",
",",
"size",
"=",
"size",
")",
")",
"return",
"np",
".",
"degrees",
"(",
"phi",
")",
",",
"np",
".",
"degrees",
"(",
"theta",
")"
] | 30.769231 | 0.041212 | [
"def sky(lon=None,lat=None,size=1):\n",
" \"\"\"\n",
" Outputs uniform points on sphere from:\n",
" [0 < lon < 360] & [-90 < lat < 90]\n",
" \"\"\"\n",
" if lon is None:\n",
" umin,umax = 0,1\n",
" else:\n",
" lon = np.asarray(lon)\n",
" lon = np.radians(lon + 360.*(lon<0))\n",
" if lon.size==1: umin=umax=lon/(2*np.pi)\n",
" elif lon.size==2: umin,umax=lon/(2*np.pi)\n",
" else: raise Exception('...')\n",
" \n",
" if lat is None:\n",
" vmin,vmax = -1,1\n",
" else:\n",
" lat = np.asarray(lat)\n",
" lat = np.radians(90 - lat)\n",
" if lat.size==1: vmin=vmax=np.cos(lat)\n",
" elif lat.size==2: vmin,vmax=np.cos(lat)\n",
" else: raise Exception('...')\n",
"\n",
" phi = 2*np.pi*np.random.uniform(umin,umax,size=size)\n",
" theta = np.arcsin(np.random.uniform(vmin,vmax,size=size))\n",
" return np.degrees(phi),np.degrees(theta)"
] | [
0.05714285714285714,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.022222222222222223,
0.1,
0.08,
0.02702702702702703,
0.1111111111111111,
0,
0.08,
0,
0,
0,
0.10416666666666667,
0.08333333333333333,
0.02702702702702703,
0,
0.03508771929824561,
0.03225806451612903,
0.045454545454545456
] | 26 | 0.03416 |
def validate(self, request, data):
"""
Validate response from OpenID server.
Set identity in case of successfull validation.
"""
client = consumer.Consumer(request.session, None)
try:
resp = client.complete(data, request.session['openid_return_to'])
except KeyError:
messages.error(request, lang.INVALID_RESPONSE_FROM_OPENID)
return redirect('netauth-login')
if resp.status == consumer.CANCEL:
messages.warning(request, lang.OPENID_CANCELED)
return redirect('netauth-login')
elif resp.status == consumer.FAILURE:
messages.error(request, lang.OPENID_FAILED % resp.message)
return redirect('netauth-login')
elif resp.status == consumer.SUCCESS:
self.identity = resp.identity_url
del request.session['openid_return_to']
return resp | [
"def",
"validate",
"(",
"self",
",",
"request",
",",
"data",
")",
":",
"client",
"=",
"consumer",
".",
"Consumer",
"(",
"request",
".",
"session",
",",
"None",
")",
"try",
":",
"resp",
"=",
"client",
".",
"complete",
"(",
"data",
",",
"request",
".",
"session",
"[",
"'openid_return_to'",
"]",
")",
"except",
"KeyError",
":",
"messages",
".",
"error",
"(",
"request",
",",
"lang",
".",
"INVALID_RESPONSE_FROM_OPENID",
")",
"return",
"redirect",
"(",
"'netauth-login'",
")",
"if",
"resp",
".",
"status",
"==",
"consumer",
".",
"CANCEL",
":",
"messages",
".",
"warning",
"(",
"request",
",",
"lang",
".",
"OPENID_CANCELED",
")",
"return",
"redirect",
"(",
"'netauth-login'",
")",
"elif",
"resp",
".",
"status",
"==",
"consumer",
".",
"FAILURE",
":",
"messages",
".",
"error",
"(",
"request",
",",
"lang",
".",
"OPENID_FAILED",
"%",
"resp",
".",
"message",
")",
"return",
"redirect",
"(",
"'netauth-login'",
")",
"elif",
"resp",
".",
"status",
"==",
"consumer",
".",
"SUCCESS",
":",
"self",
".",
"identity",
"=",
"resp",
".",
"identity_url",
"del",
"request",
".",
"session",
"[",
"'openid_return_to'",
"]",
"return",
"resp"
] | 41.272727 | 0.002153 | [
"def validate(self, request, data):\n",
" \"\"\"\n",
" Validate response from OpenID server.\n",
" Set identity in case of successfull validation.\n",
" \"\"\"\n",
" client = consumer.Consumer(request.session, None)\n",
"\n",
" try:\n",
" resp = client.complete(data, request.session['openid_return_to'])\n",
" except KeyError:\n",
" messages.error(request, lang.INVALID_RESPONSE_FROM_OPENID)\n",
" return redirect('netauth-login')\n",
" if resp.status == consumer.CANCEL:\n",
" messages.warning(request, lang.OPENID_CANCELED)\n",
" return redirect('netauth-login')\n",
" elif resp.status == consumer.FAILURE:\n",
" messages.error(request, lang.OPENID_FAILED % resp.message)\n",
" return redirect('netauth-login')\n",
" elif resp.status == consumer.SUCCESS:\n",
" self.identity = resp.identity_url\n",
" del request.session['openid_return_to']\n",
" return resp"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216
] | 22 | 0.005764 |
def get(self, query, responseformat="geojson", verbosity="body", build=True):
"""Pass in an Overpass query in Overpass QL."""
# Construct full Overpass query
if build:
full_query = self._construct_ql_query(
query, responseformat=responseformat, verbosity=verbosity
)
else:
full_query = query
if self.debug:
logging.getLogger().info(query)
# Get the response from Overpass
r = self._get_from_overpass(full_query)
content_type = r.headers.get("content-type")
if self.debug:
print(content_type)
if content_type == "text/csv":
result = []
reader = csv.reader(StringIO(r.text), delimiter="\t")
for row in reader:
result.append(row)
return result
elif content_type in ("text/xml", "application/xml", "application/osm3s+xml"):
return r.text
elif content_type == "application/json":
response = json.loads(r.text)
if not build:
return response
# Check for valid answer from Overpass.
# A valid answer contains an 'elements' key at the root level.
if "elements" not in response:
raise UnknownOverpassError("Received an invalid answer from Overpass.")
# If there is a 'remark' key, it spells trouble.
overpass_remark = response.get("remark", None)
if overpass_remark and overpass_remark.startswith("runtime error"):
raise ServerRuntimeError(overpass_remark)
if responseformat is not "geojson":
return response
# construct geojson
return self._as_geojson(response["elements"]) | [
"def",
"get",
"(",
"self",
",",
"query",
",",
"responseformat",
"=",
"\"geojson\"",
",",
"verbosity",
"=",
"\"body\"",
",",
"build",
"=",
"True",
")",
":",
"# Construct full Overpass query",
"if",
"build",
":",
"full_query",
"=",
"self",
".",
"_construct_ql_query",
"(",
"query",
",",
"responseformat",
"=",
"responseformat",
",",
"verbosity",
"=",
"verbosity",
")",
"else",
":",
"full_query",
"=",
"query",
"if",
"self",
".",
"debug",
":",
"logging",
".",
"getLogger",
"(",
")",
".",
"info",
"(",
"query",
")",
"# Get the response from Overpass",
"r",
"=",
"self",
".",
"_get_from_overpass",
"(",
"full_query",
")",
"content_type",
"=",
"r",
".",
"headers",
".",
"get",
"(",
"\"content-type\"",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"content_type",
")",
"if",
"content_type",
"==",
"\"text/csv\"",
":",
"result",
"=",
"[",
"]",
"reader",
"=",
"csv",
".",
"reader",
"(",
"StringIO",
"(",
"r",
".",
"text",
")",
",",
"delimiter",
"=",
"\"\\t\"",
")",
"for",
"row",
"in",
"reader",
":",
"result",
".",
"append",
"(",
"row",
")",
"return",
"result",
"elif",
"content_type",
"in",
"(",
"\"text/xml\"",
",",
"\"application/xml\"",
",",
"\"application/osm3s+xml\"",
")",
":",
"return",
"r",
".",
"text",
"elif",
"content_type",
"==",
"\"application/json\"",
":",
"response",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"if",
"not",
"build",
":",
"return",
"response",
"# Check for valid answer from Overpass.",
"# A valid answer contains an 'elements' key at the root level.",
"if",
"\"elements\"",
"not",
"in",
"response",
":",
"raise",
"UnknownOverpassError",
"(",
"\"Received an invalid answer from Overpass.\"",
")",
"# If there is a 'remark' key, it spells trouble.",
"overpass_remark",
"=",
"response",
".",
"get",
"(",
"\"remark\"",
",",
"None",
")",
"if",
"overpass_remark",
"and",
"overpass_remark",
".",
"startswith",
"(",
"\"runtime error\"",
")",
":",
"raise",
"ServerRuntimeError",
"(",
"overpass_remark",
")",
"if",
"responseformat",
"is",
"not",
"\"geojson\"",
":",
"return",
"response",
"# construct geojson",
"return",
"self",
".",
"_as_geojson",
"(",
"response",
"[",
"\"elements\"",
"]",
")"
] | 35.6875 | 0.002273 | [
"def get(self, query, responseformat=\"geojson\", verbosity=\"body\", build=True):\n",
" \"\"\"Pass in an Overpass query in Overpass QL.\"\"\"\n",
" # Construct full Overpass query\n",
" if build:\n",
" full_query = self._construct_ql_query(\n",
" query, responseformat=responseformat, verbosity=verbosity\n",
" )\n",
" else:\n",
" full_query = query\n",
"\n",
" if self.debug:\n",
" logging.getLogger().info(query)\n",
"\n",
" # Get the response from Overpass\n",
" r = self._get_from_overpass(full_query)\n",
" content_type = r.headers.get(\"content-type\")\n",
"\n",
" if self.debug:\n",
" print(content_type)\n",
" if content_type == \"text/csv\":\n",
" result = []\n",
" reader = csv.reader(StringIO(r.text), delimiter=\"\\t\")\n",
" for row in reader:\n",
" result.append(row)\n",
" return result\n",
" elif content_type in (\"text/xml\", \"application/xml\", \"application/osm3s+xml\"):\n",
" return r.text\n",
" elif content_type == \"application/json\":\n",
" response = json.loads(r.text)\n",
"\n",
" if not build:\n",
" return response\n",
"\n",
" # Check for valid answer from Overpass.\n",
" # A valid answer contains an 'elements' key at the root level.\n",
" if \"elements\" not in response:\n",
" raise UnknownOverpassError(\"Received an invalid answer from Overpass.\")\n",
"\n",
" # If there is a 'remark' key, it spells trouble.\n",
" overpass_remark = response.get(\"remark\", None)\n",
" if overpass_remark and overpass_remark.startswith(\"runtime error\"):\n",
" raise ServerRuntimeError(overpass_remark)\n",
"\n",
" if responseformat is not \"geojson\":\n",
" return response\n",
"\n",
" # construct geojson\n",
" return self._as_geojson(response[\"elements\"])"
] | [
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886
] | 48 | 0.001253 |
def colorz(fd, n=DEFAULT_NUM_COLORS, min_v=DEFAULT_MINV, max_v=DEFAULT_MAXV,
bold_add=DEFAULT_BOLD_ADD, order_colors=True):
"""
Get the n most dominant colors of an image.
Clamps value to between min_v and max_v.
Creates bold colors using bold_add.
Total number of colors returned is 2*n, optionally ordered by hue.
Returns as a list of pairs of RGB triples.
For terminal colors, the hue order is:
red, yellow, green, cyan, blue, magenta
"""
img = Image.open(fd)
img.thumbnail(THUMB_SIZE)
obs = get_colors(img)
clamped = [clamp(color, min_v, max_v) for color in obs]
clusters, _ = kmeans(array(clamped).astype(float), n)
colors = order_by_hue(clusters) if order_colors else clusters
return list(zip(colors, [brighten(c, bold_add) for c in colors])) | [
"def",
"colorz",
"(",
"fd",
",",
"n",
"=",
"DEFAULT_NUM_COLORS",
",",
"min_v",
"=",
"DEFAULT_MINV",
",",
"max_v",
"=",
"DEFAULT_MAXV",
",",
"bold_add",
"=",
"DEFAULT_BOLD_ADD",
",",
"order_colors",
"=",
"True",
")",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"fd",
")",
"img",
".",
"thumbnail",
"(",
"THUMB_SIZE",
")",
"obs",
"=",
"get_colors",
"(",
"img",
")",
"clamped",
"=",
"[",
"clamp",
"(",
"color",
",",
"min_v",
",",
"max_v",
")",
"for",
"color",
"in",
"obs",
"]",
"clusters",
",",
"_",
"=",
"kmeans",
"(",
"array",
"(",
"clamped",
")",
".",
"astype",
"(",
"float",
")",
",",
"n",
")",
"colors",
"=",
"order_by_hue",
"(",
"clusters",
")",
"if",
"order_colors",
"else",
"clusters",
"return",
"list",
"(",
"zip",
"(",
"colors",
",",
"[",
"brighten",
"(",
"c",
",",
"bold_add",
")",
"for",
"c",
"in",
"colors",
"]",
")",
")"
] | 38.380952 | 0.001211 | [
"def colorz(fd, n=DEFAULT_NUM_COLORS, min_v=DEFAULT_MINV, max_v=DEFAULT_MAXV,\n",
" bold_add=DEFAULT_BOLD_ADD, order_colors=True):\n",
" \"\"\"\n",
" Get the n most dominant colors of an image.\n",
" Clamps value to between min_v and max_v.\n",
"\n",
" Creates bold colors using bold_add.\n",
" Total number of colors returned is 2*n, optionally ordered by hue.\n",
" Returns as a list of pairs of RGB triples.\n",
"\n",
" For terminal colors, the hue order is:\n",
" red, yellow, green, cyan, blue, magenta\n",
" \"\"\"\n",
" img = Image.open(fd)\n",
" img.thumbnail(THUMB_SIZE)\n",
"\n",
" obs = get_colors(img)\n",
" clamped = [clamp(color, min_v, max_v) for color in obs]\n",
" clusters, _ = kmeans(array(clamped).astype(float), n)\n",
" colors = order_by_hue(clusters) if order_colors else clusters\n",
" return list(zip(colors, [brighten(c, bold_add) for c in colors]))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406
] | 21 | 0.00069 |
def generateOutputInflowFile(self,
out_nc,
start_datetime_utc,
number_of_timesteps,
simulation_time_step_seconds,
in_rapid_connect_file,
in_rivid_lat_lon_z_file,
land_surface_model_description,
modeling_institution
):
"""
Generate inflow file for RAPID
"""
self.simulation_time_step_seconds = simulation_time_step_seconds
# Create output inflow netcdf data
print("Generating inflow file ...")
data_out_nc = Dataset(out_nc, "w", format="NETCDF3_CLASSIC")
rivid_list = np.loadtxt(in_rapid_connect_file,
delimiter=",",
ndmin=1,
usecols=(0,),
dtype=int)
# create dimensions
data_out_nc.createDimension('time', number_of_timesteps)
data_out_nc.createDimension('rivid', len(rivid_list))
data_out_nc.createDimension('nv', 2)
# create variables
# m3_riv
m3_riv_var = data_out_nc.createVariable('m3_riv', 'f4',
('time', 'rivid'),
fill_value=0)
m3_riv_var.long_name = 'accumulated external water volume ' \
'inflow upstream of each river reach'
m3_riv_var.units = 'm3'
m3_riv_var.coordinates = 'lon lat'
m3_riv_var.grid_mapping = 'crs'
m3_riv_var.cell_methods = "time: sum"
data_out_nc.close()
try:
data_out_nc = Dataset(out_nc, "a", format="NETCDF3_CLASSIC")
# rivid
rivid_var = data_out_nc.createVariable('rivid', 'i4',
('rivid',))
rivid_var.long_name = 'unique identifier for each river reach'
rivid_var.units = '1'
rivid_var.cf_role = 'timeseries_id'
rivid_var[:] = rivid_list
# time
time_var = data_out_nc.createVariable('time', 'i4',
('time',))
time_var.long_name = 'time'
time_var.standard_name = 'time'
time_var.units = 'seconds since 1970-01-01 00:00:00+00:00'
time_var.axis = 'T'
time_var.calendar = 'gregorian'
time_var.bounds = 'time_bnds'
initial_time_seconds = \
(start_datetime_utc.replace(tzinfo=utc) -
datetime(1970, 1, 1, tzinfo=utc)).total_seconds()
final_time_seconds = \
initial_time_seconds + number_of_timesteps\
* simulation_time_step_seconds
time_array = np.arange(initial_time_seconds, final_time_seconds,
simulation_time_step_seconds)
time_var[:] = time_array
# time_bnds
time_bnds_var = data_out_nc.createVariable('time_bnds', 'i4',
('time', 'nv',))
for time_index, time_element in enumerate(time_array):
time_bnds_var[time_index, 0] = time_element
time_bnds_var[time_index, 1] = \
time_element + simulation_time_step_seconds
# longitude
lon_var = data_out_nc.createVariable('lon', 'f8', ('rivid',),
fill_value=-9999.0)
lon_var.long_name = \
'longitude of a point related to each river reach'
lon_var.standard_name = 'longitude'
lon_var.units = 'degrees_east'
lon_var.axis = 'X'
# latitude
lat_var = data_out_nc.createVariable('lat', 'f8', ('rivid',),
fill_value=-9999.0)
lat_var.long_name = \
'latitude of a point related to each river reach'
lat_var.standard_name = 'latitude'
lat_var.units = 'degrees_north'
lat_var.axis = 'Y'
crs_var = data_out_nc.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.epsg_code = 'EPSG:4326' # WGS 84
crs_var.semi_major_axis = 6378137.0
crs_var.inverse_flattening = 298.257223563
# add global attributes
data_out_nc.Conventions = 'CF-1.6'
data_out_nc.title = 'RAPID Inflow from {0}'\
.format(land_surface_model_description)
data_out_nc.history = 'date_created: {0}'\
.format(datetime.utcnow().replace(tzinfo=utc))
data_out_nc.featureType = 'timeSeries'
data_out_nc.institution = modeling_institution
# write lat lon data
self._write_lat_lon(data_out_nc, in_rivid_lat_lon_z_file)
# close file
data_out_nc.close()
except RuntimeError:
print("File size too big to add data beforehand."
" Performing conversion after ...") | [
"def",
"generateOutputInflowFile",
"(",
"self",
",",
"out_nc",
",",
"start_datetime_utc",
",",
"number_of_timesteps",
",",
"simulation_time_step_seconds",
",",
"in_rapid_connect_file",
",",
"in_rivid_lat_lon_z_file",
",",
"land_surface_model_description",
",",
"modeling_institution",
")",
":",
"self",
".",
"simulation_time_step_seconds",
"=",
"simulation_time_step_seconds",
"# Create output inflow netcdf data\r",
"print",
"(",
"\"Generating inflow file ...\"",
")",
"data_out_nc",
"=",
"Dataset",
"(",
"out_nc",
",",
"\"w\"",
",",
"format",
"=",
"\"NETCDF3_CLASSIC\"",
")",
"rivid_list",
"=",
"np",
".",
"loadtxt",
"(",
"in_rapid_connect_file",
",",
"delimiter",
"=",
"\",\"",
",",
"ndmin",
"=",
"1",
",",
"usecols",
"=",
"(",
"0",
",",
")",
",",
"dtype",
"=",
"int",
")",
"# create dimensions\r",
"data_out_nc",
".",
"createDimension",
"(",
"'time'",
",",
"number_of_timesteps",
")",
"data_out_nc",
".",
"createDimension",
"(",
"'rivid'",
",",
"len",
"(",
"rivid_list",
")",
")",
"data_out_nc",
".",
"createDimension",
"(",
"'nv'",
",",
"2",
")",
"# create variables\r",
"# m3_riv\r",
"m3_riv_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'m3_riv'",
",",
"'f4'",
",",
"(",
"'time'",
",",
"'rivid'",
")",
",",
"fill_value",
"=",
"0",
")",
"m3_riv_var",
".",
"long_name",
"=",
"'accumulated external water volume '",
"'inflow upstream of each river reach'",
"m3_riv_var",
".",
"units",
"=",
"'m3'",
"m3_riv_var",
".",
"coordinates",
"=",
"'lon lat'",
"m3_riv_var",
".",
"grid_mapping",
"=",
"'crs'",
"m3_riv_var",
".",
"cell_methods",
"=",
"\"time: sum\"",
"data_out_nc",
".",
"close",
"(",
")",
"try",
":",
"data_out_nc",
"=",
"Dataset",
"(",
"out_nc",
",",
"\"a\"",
",",
"format",
"=",
"\"NETCDF3_CLASSIC\"",
")",
"# rivid\r",
"rivid_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'rivid'",
",",
"'i4'",
",",
"(",
"'rivid'",
",",
")",
")",
"rivid_var",
".",
"long_name",
"=",
"'unique identifier for each river reach'",
"rivid_var",
".",
"units",
"=",
"'1'",
"rivid_var",
".",
"cf_role",
"=",
"'timeseries_id'",
"rivid_var",
"[",
":",
"]",
"=",
"rivid_list",
"# time\r",
"time_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'time'",
",",
"'i4'",
",",
"(",
"'time'",
",",
")",
")",
"time_var",
".",
"long_name",
"=",
"'time'",
"time_var",
".",
"standard_name",
"=",
"'time'",
"time_var",
".",
"units",
"=",
"'seconds since 1970-01-01 00:00:00+00:00'",
"time_var",
".",
"axis",
"=",
"'T'",
"time_var",
".",
"calendar",
"=",
"'gregorian'",
"time_var",
".",
"bounds",
"=",
"'time_bnds'",
"initial_time_seconds",
"=",
"(",
"start_datetime_utc",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
",",
"tzinfo",
"=",
"utc",
")",
")",
".",
"total_seconds",
"(",
")",
"final_time_seconds",
"=",
"initial_time_seconds",
"+",
"number_of_timesteps",
"*",
"simulation_time_step_seconds",
"time_array",
"=",
"np",
".",
"arange",
"(",
"initial_time_seconds",
",",
"final_time_seconds",
",",
"simulation_time_step_seconds",
")",
"time_var",
"[",
":",
"]",
"=",
"time_array",
"# time_bnds\r",
"time_bnds_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'time_bnds'",
",",
"'i4'",
",",
"(",
"'time'",
",",
"'nv'",
",",
")",
")",
"for",
"time_index",
",",
"time_element",
"in",
"enumerate",
"(",
"time_array",
")",
":",
"time_bnds_var",
"[",
"time_index",
",",
"0",
"]",
"=",
"time_element",
"time_bnds_var",
"[",
"time_index",
",",
"1",
"]",
"=",
"time_element",
"+",
"simulation_time_step_seconds",
"# longitude\r",
"lon_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'lon'",
",",
"'f8'",
",",
"(",
"'rivid'",
",",
")",
",",
"fill_value",
"=",
"-",
"9999.0",
")",
"lon_var",
".",
"long_name",
"=",
"'longitude of a point related to each river reach'",
"lon_var",
".",
"standard_name",
"=",
"'longitude'",
"lon_var",
".",
"units",
"=",
"'degrees_east'",
"lon_var",
".",
"axis",
"=",
"'X'",
"# latitude\r",
"lat_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'lat'",
",",
"'f8'",
",",
"(",
"'rivid'",
",",
")",
",",
"fill_value",
"=",
"-",
"9999.0",
")",
"lat_var",
".",
"long_name",
"=",
"'latitude of a point related to each river reach'",
"lat_var",
".",
"standard_name",
"=",
"'latitude'",
"lat_var",
".",
"units",
"=",
"'degrees_north'",
"lat_var",
".",
"axis",
"=",
"'Y'",
"crs_var",
"=",
"data_out_nc",
".",
"createVariable",
"(",
"'crs'",
",",
"'i4'",
")",
"crs_var",
".",
"grid_mapping_name",
"=",
"'latitude_longitude'",
"crs_var",
".",
"epsg_code",
"=",
"'EPSG:4326'",
"# WGS 84\r",
"crs_var",
".",
"semi_major_axis",
"=",
"6378137.0",
"crs_var",
".",
"inverse_flattening",
"=",
"298.257223563",
"# add global attributes\r",
"data_out_nc",
".",
"Conventions",
"=",
"'CF-1.6'",
"data_out_nc",
".",
"title",
"=",
"'RAPID Inflow from {0}'",
"",
".",
"format",
"(",
"land_surface_model_description",
")",
"data_out_nc",
".",
"history",
"=",
"'date_created: {0}'",
".",
"format",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
")",
"data_out_nc",
".",
"featureType",
"=",
"'timeSeries'",
"data_out_nc",
".",
"institution",
"=",
"modeling_institution",
"# write lat lon data\r",
"self",
".",
"_write_lat_lon",
"(",
"data_out_nc",
",",
"in_rivid_lat_lon_z_file",
")",
"# close file\r",
"data_out_nc",
".",
"close",
"(",
")",
"except",
"RuntimeError",
":",
"print",
"(",
"\"File size too big to add data beforehand.\"",
"\" Performing conversion after ...\"",
")"
] | 44.882353 | 0.002015 | [
"def generateOutputInflowFile(self,\r\n",
" out_nc,\r\n",
" start_datetime_utc,\r\n",
" number_of_timesteps,\r\n",
" simulation_time_step_seconds,\r\n",
" in_rapid_connect_file,\r\n",
" in_rivid_lat_lon_z_file,\r\n",
" land_surface_model_description,\r\n",
" modeling_institution\r\n",
" ):\r\n",
" \"\"\"\r\n",
" Generate inflow file for RAPID\r\n",
" \"\"\"\r\n",
" self.simulation_time_step_seconds = simulation_time_step_seconds\r\n",
"\r\n",
" # Create output inflow netcdf data\r\n",
" print(\"Generating inflow file ...\")\r\n",
" data_out_nc = Dataset(out_nc, \"w\", format=\"NETCDF3_CLASSIC\")\r\n",
" rivid_list = np.loadtxt(in_rapid_connect_file,\r\n",
" delimiter=\",\",\r\n",
" ndmin=1,\r\n",
" usecols=(0,),\r\n",
" dtype=int)\r\n",
" # create dimensions\r\n",
" data_out_nc.createDimension('time', number_of_timesteps)\r\n",
" data_out_nc.createDimension('rivid', len(rivid_list))\r\n",
" data_out_nc.createDimension('nv', 2)\r\n",
" # create variables\r\n",
" # m3_riv\r\n",
" m3_riv_var = data_out_nc.createVariable('m3_riv', 'f4',\r\n",
" ('time', 'rivid'),\r\n",
" fill_value=0)\r\n",
" m3_riv_var.long_name = 'accumulated external water volume ' \\\r\n",
" 'inflow upstream of each river reach'\r\n",
" m3_riv_var.units = 'm3'\r\n",
" m3_riv_var.coordinates = 'lon lat'\r\n",
" m3_riv_var.grid_mapping = 'crs'\r\n",
" m3_riv_var.cell_methods = \"time: sum\"\r\n",
" data_out_nc.close()\r\n",
"\r\n",
" try:\r\n",
" data_out_nc = Dataset(out_nc, \"a\", format=\"NETCDF3_CLASSIC\")\r\n",
" # rivid\r\n",
" rivid_var = data_out_nc.createVariable('rivid', 'i4',\r\n",
" ('rivid',))\r\n",
" rivid_var.long_name = 'unique identifier for each river reach'\r\n",
" rivid_var.units = '1'\r\n",
" rivid_var.cf_role = 'timeseries_id'\r\n",
" rivid_var[:] = rivid_list\r\n",
"\r\n",
" # time\r\n",
" time_var = data_out_nc.createVariable('time', 'i4',\r\n",
" ('time',))\r\n",
" time_var.long_name = 'time'\r\n",
" time_var.standard_name = 'time'\r\n",
" time_var.units = 'seconds since 1970-01-01 00:00:00+00:00'\r\n",
" time_var.axis = 'T'\r\n",
" time_var.calendar = 'gregorian'\r\n",
" time_var.bounds = 'time_bnds'\r\n",
"\r\n",
" initial_time_seconds = \\\r\n",
" (start_datetime_utc.replace(tzinfo=utc) -\r\n",
" datetime(1970, 1, 1, tzinfo=utc)).total_seconds()\r\n",
" final_time_seconds = \\\r\n",
" initial_time_seconds + number_of_timesteps\\\r\n",
" * simulation_time_step_seconds\r\n",
" time_array = np.arange(initial_time_seconds, final_time_seconds,\r\n",
" simulation_time_step_seconds)\r\n",
" time_var[:] = time_array\r\n",
"\r\n",
" # time_bnds\r\n",
" time_bnds_var = data_out_nc.createVariable('time_bnds', 'i4',\r\n",
" ('time', 'nv',))\r\n",
" for time_index, time_element in enumerate(time_array):\r\n",
" time_bnds_var[time_index, 0] = time_element\r\n",
" time_bnds_var[time_index, 1] = \\\r\n",
" time_element + simulation_time_step_seconds\r\n",
"\r\n",
" # longitude\r\n",
" lon_var = data_out_nc.createVariable('lon', 'f8', ('rivid',),\r\n",
" fill_value=-9999.0)\r\n",
" lon_var.long_name = \\\r\n",
" 'longitude of a point related to each river reach'\r\n",
" lon_var.standard_name = 'longitude'\r\n",
" lon_var.units = 'degrees_east'\r\n",
" lon_var.axis = 'X'\r\n",
"\r\n",
" # latitude\r\n",
" lat_var = data_out_nc.createVariable('lat', 'f8', ('rivid',),\r\n",
" fill_value=-9999.0)\r\n",
" lat_var.long_name = \\\r\n",
" 'latitude of a point related to each river reach'\r\n",
" lat_var.standard_name = 'latitude'\r\n",
" lat_var.units = 'degrees_north'\r\n",
" lat_var.axis = 'Y'\r\n",
"\r\n",
" crs_var = data_out_nc.createVariable('crs', 'i4')\r\n",
" crs_var.grid_mapping_name = 'latitude_longitude'\r\n",
" crs_var.epsg_code = 'EPSG:4326' # WGS 84\r\n",
" crs_var.semi_major_axis = 6378137.0\r\n",
" crs_var.inverse_flattening = 298.257223563\r\n",
"\r\n",
" # add global attributes\r\n",
" data_out_nc.Conventions = 'CF-1.6'\r\n",
" data_out_nc.title = 'RAPID Inflow from {0}'\\\r\n",
" .format(land_surface_model_description)\r\n",
" data_out_nc.history = 'date_created: {0}'\\\r\n",
" .format(datetime.utcnow().replace(tzinfo=utc))\r\n",
" data_out_nc.featureType = 'timeSeries'\r\n",
" data_out_nc.institution = modeling_institution\r\n",
"\r\n",
" # write lat lon data\r\n",
" self._write_lat_lon(data_out_nc, in_rivid_lat_lon_z_file)\r\n",
"\r\n",
" # close file\r\n",
" data_out_nc.close()\r\n",
" except RuntimeError:\r\n",
" print(\"File size too big to add data beforehand.\"\r\n",
" \" Performing conversion after ...\")"
] | [
0,
0.023809523809523808,
0.018518518518518517,
0.01818181818181818,
0.015625,
0.017543859649122806,
0.01694915254237288,
0.015151515151515152,
0.01818181818181818,
0.02702702702702703,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886
] | 119 | 0.002242 |
def pshp_soundex_first(fname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a first name.
This is a wrapper for :py:meth:`PSHPSoundexFirst.encode`.
Parameters
----------
fname : str
The first name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pshp_soundex_first('Smith')
'S530'
>>> pshp_soundex_first('Waters')
'W352'
>>> pshp_soundex_first('James')
'J700'
>>> pshp_soundex_first('Schmidt')
'S500'
>>> pshp_soundex_first('Ashcroft')
'A220'
>>> pshp_soundex_first('John')
'J500'
>>> pshp_soundex_first('Colin')
'K400'
>>> pshp_soundex_first('Niall')
'N400'
>>> pshp_soundex_first('Sally')
'S400'
>>> pshp_soundex_first('Jane')
'J500'
"""
return PSHPSoundexFirst().encode(fname, max_length, german) | [
"def",
"pshp_soundex_first",
"(",
"fname",
",",
"max_length",
"=",
"4",
",",
"german",
"=",
"False",
")",
":",
"return",
"PSHPSoundexFirst",
"(",
")",
".",
"encode",
"(",
"fname",
",",
"max_length",
",",
"german",
")"
] | 23.409091 | 0.000932 | [
"def pshp_soundex_first(fname, max_length=4, german=False):\n",
" \"\"\"Calculate the PSHP Soundex/Viewex Coding of a first name.\n",
"\n",
" This is a wrapper for :py:meth:`PSHPSoundexFirst.encode`.\n",
"\n",
" Parameters\n",
" ----------\n",
" fname : str\n",
" The first name to encode\n",
" max_length : int\n",
" The length of the code returned (defaults to 4)\n",
" german : bool\n",
" Set to True if the name is German (different rules apply)\n",
"\n",
" Returns\n",
" -------\n",
" str\n",
" The PSHP Soundex/Viewex Coding\n",
"\n",
" Examples\n",
" --------\n",
" >>> pshp_soundex_first('Smith')\n",
" 'S530'\n",
" >>> pshp_soundex_first('Waters')\n",
" 'W352'\n",
" >>> pshp_soundex_first('James')\n",
" 'J700'\n",
" >>> pshp_soundex_first('Schmidt')\n",
" 'S500'\n",
" >>> pshp_soundex_first('Ashcroft')\n",
" 'A220'\n",
" >>> pshp_soundex_first('John')\n",
" 'J500'\n",
" >>> pshp_soundex_first('Colin')\n",
" 'K400'\n",
" >>> pshp_soundex_first('Niall')\n",
" 'N400'\n",
" >>> pshp_soundex_first('Sally')\n",
" 'S400'\n",
" >>> pshp_soundex_first('Jane')\n",
" 'J500'\n",
"\n",
" \"\"\"\n",
" return PSHPSoundexFirst().encode(fname, max_length, german)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872
] | 44 | 0.000361 |
def message(self):
"""
Return issue message.
"""
message = self.description.format(**self.parameters)
return '{code} {message}'.format(code=self.code, message=message) | [
"def",
"message",
"(",
"self",
")",
":",
"message",
"=",
"self",
".",
"description",
".",
"format",
"(",
"*",
"*",
"self",
".",
"parameters",
")",
"return",
"'{code} {message}'",
".",
"format",
"(",
"code",
"=",
"self",
".",
"code",
",",
"message",
"=",
"message",
")"
] | 33.666667 | 0.009662 | [
"def message(self):\n",
" \"\"\"\n",
" Return issue message.\n",
" \"\"\"\n",
" message = self.description.format(**self.parameters)\n",
" return '{code} {message}'.format(code=self.code, message=message)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0.0136986301369863
] | 6 | 0.016172 |
def reportMatchCompletion(cfg, results, replayData):
"""send information back to the server about the match's winners/losers"""
payload = json.dumps([cfg.flatten(), results, replayData])
ladder = cfg.ladder
return requests.post(
url = c.URL_BASE%(ladder.ipAddress, ladder.serverPort, "matchfinished"),
data = payload,
#headers=headers,
) | [
"def",
"reportMatchCompletion",
"(",
"cfg",
",",
"results",
",",
"replayData",
")",
":",
"payload",
"=",
"json",
".",
"dumps",
"(",
"[",
"cfg",
".",
"flatten",
"(",
")",
",",
"results",
",",
"replayData",
"]",
")",
"ladder",
"=",
"cfg",
".",
"ladder",
"return",
"requests",
".",
"post",
"(",
"url",
"=",
"c",
".",
"URL_BASE",
"%",
"(",
"ladder",
".",
"ipAddress",
",",
"ladder",
".",
"serverPort",
",",
"\"matchfinished\"",
")",
",",
"data",
"=",
"payload",
",",
"#headers=headers,",
")"
] | 41.555556 | 0.02356 | [
"def reportMatchCompletion(cfg, results, replayData):\n",
" \"\"\"send information back to the server about the match's winners/losers\"\"\"\n",
" payload = json.dumps([cfg.flatten(), results, replayData])\n",
" ladder = cfg.ladder\n",
" return requests.post(\n",
" url = c.URL_BASE%(ladder.ipAddress, ladder.serverPort, \"matchfinished\"),\n",
" data = payload,\n",
" #headers=headers,\n",
" )"
] | [
0,
0,
0,
0,
0,
0.06097560975609756,
0.08333333333333333,
0.038461538461538464,
0.2
] | 9 | 0.04253 |
def Pager(self, service):
"""A page generator for this service query and the provided service.
This generates a page as a result from using the provided service's query()
method until there are no more results to fetch.
Args:
service: The service object for making a query using this service query.
Yields:
A resulting page from querying the provided service.
"""
has_page = True
while has_page:
page = service.query(self)
yield page
has_page = self.HasNext(page)
if has_page:
self.NextPage() | [
"def",
"Pager",
"(",
"self",
",",
"service",
")",
":",
"has_page",
"=",
"True",
"while",
"has_page",
":",
"page",
"=",
"service",
".",
"query",
"(",
"self",
")",
"yield",
"page",
"has_page",
"=",
"self",
".",
"HasNext",
"(",
"page",
")",
"if",
"has_page",
":",
"self",
".",
"NextPage",
"(",
")"
] | 29.105263 | 0.008757 | [
"def Pager(self, service):\n",
" \"\"\"A page generator for this service query and the provided service.\n",
"\n",
" This generates a page as a result from using the provided service's query()\n",
" method until there are no more results to fetch.\n",
"\n",
" Args:\n",
" service: The service object for making a query using this service query.\n",
"\n",
" Yields:\n",
" A resulting page from querying the provided service.\n",
" \"\"\"\n",
" has_page = True\n",
" while has_page:\n",
" page = service.query(self)\n",
" yield page\n",
" has_page = self.HasNext(page)\n",
" if has_page:\n",
" self.NextPage()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0.058823529411764705,
0.027777777777777776,
0.05263157894736842,
0.043478260869565216
] | 19 | 0.011211 |
def _read_attr(attr_name):
"""
Parse attribute from file 'pefile.py' and avoid importing
this module directly.
__version__, __author__, __contact__,
"""
regex = attr_name + r"\s+=\s+'(.+)'"
if sys.version_info.major == 2:
with open('pefile.py', 'r') as f:
match = re.search(regex, f.read())
else:
with open('pefile.py', 'r', encoding='utf-8') as f:
match = re.search(regex, f.read())
# Second item in the group is the value of attribute.
return match.group(1) | [
"def",
"_read_attr",
"(",
"attr_name",
")",
":",
"regex",
"=",
"attr_name",
"+",
"r\"\\s+=\\s+'(.+)'\"",
"if",
"sys",
".",
"version_info",
".",
"major",
"==",
"2",
":",
"with",
"open",
"(",
"'pefile.py'",
",",
"'r'",
")",
"as",
"f",
":",
"match",
"=",
"re",
".",
"search",
"(",
"regex",
",",
"f",
".",
"read",
"(",
")",
")",
"else",
":",
"with",
"open",
"(",
"'pefile.py'",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"match",
"=",
"re",
".",
"search",
"(",
"regex",
",",
"f",
".",
"read",
"(",
")",
")",
"# Second item in the group is the value of attribute.",
"return",
"match",
".",
"group",
"(",
"1",
")"
] | 32.8125 | 0.001852 | [
"def _read_attr(attr_name):\n",
" \"\"\"\n",
" Parse attribute from file 'pefile.py' and avoid importing\n",
" this module directly.\n",
"\n",
" __version__, __author__, __contact__,\n",
" \"\"\"\n",
" regex = attr_name + r\"\\s+=\\s+'(.+)'\"\n",
" if sys.version_info.major == 2:\n",
" with open('pefile.py', 'r') as f:\n",
" match = re.search(regex, f.read())\n",
" else:\n",
" with open('pefile.py', 'r', encoding='utf-8') as f:\n",
" match = re.search(regex, f.read())\n",
" # Second item in the group is the value of attribute.\n",
" return match.group(1)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04
] | 16 | 0.0025 |
def _get_private_key_obj(private_key, passphrase=None):
'''
Returns a private key object based on PEM text.
'''
private_key = _text_or_file(private_key)
private_key = get_pem_entry(private_key, pem_type='(?:RSA )?PRIVATE KEY')
rsaprivkey = M2Crypto.RSA.load_key_string(
private_key, callback=_passphrase_callback(passphrase))
evpprivkey = M2Crypto.EVP.PKey()
evpprivkey.assign_rsa(rsaprivkey)
return evpprivkey | [
"def",
"_get_private_key_obj",
"(",
"private_key",
",",
"passphrase",
"=",
"None",
")",
":",
"private_key",
"=",
"_text_or_file",
"(",
"private_key",
")",
"private_key",
"=",
"get_pem_entry",
"(",
"private_key",
",",
"pem_type",
"=",
"'(?:RSA )?PRIVATE KEY'",
")",
"rsaprivkey",
"=",
"M2Crypto",
".",
"RSA",
".",
"load_key_string",
"(",
"private_key",
",",
"callback",
"=",
"_passphrase_callback",
"(",
"passphrase",
")",
")",
"evpprivkey",
"=",
"M2Crypto",
".",
"EVP",
".",
"PKey",
"(",
")",
"evpprivkey",
".",
"assign_rsa",
"(",
"rsaprivkey",
")",
"return",
"evpprivkey"
] | 40.363636 | 0.002203 | [
"def _get_private_key_obj(private_key, passphrase=None):\n",
" '''\n",
" Returns a private key object based on PEM text.\n",
" '''\n",
" private_key = _text_or_file(private_key)\n",
" private_key = get_pem_entry(private_key, pem_type='(?:RSA )?PRIVATE KEY')\n",
" rsaprivkey = M2Crypto.RSA.load_key_string(\n",
" private_key, callback=_passphrase_callback(passphrase))\n",
" evpprivkey = M2Crypto.EVP.PKey()\n",
" evpprivkey.assign_rsa(rsaprivkey)\n",
" return evpprivkey"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 11 | 0.004329 |
def is_measure(self):
"""Return true if the colum is a dimension"""
from ambry.valuetype.core import ROLE
return self.role == ROLE.MEASURE | [
"def",
"is_measure",
"(",
"self",
")",
":",
"from",
"ambry",
".",
"valuetype",
".",
"core",
"import",
"ROLE",
"return",
"self",
".",
"role",
"==",
"ROLE",
".",
"MEASURE"
] | 39.75 | 0.012346 | [
"def is_measure(self):\n",
" \"\"\"Return true if the colum is a dimension\"\"\"\n",
" from ambry.valuetype.core import ROLE\n",
" return self.role == ROLE.MEASURE"
] | [
0,
0.018518518518518517,
0,
0.025
] | 4 | 0.01088 |
def auto_repr(obj: Any, with_addr: bool = False,
sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Works its way through the object's ``__dict__`` and reports accordingly.
Args:
obj: object to display
with_addr: include the memory address of ``obj``
sort_attrs: sort the attributes into alphabetical order?
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
if sort_attrs:
keys = sorted(obj.__dict__.keys())
else:
keys = obj.__dict__.keys()
elements = ["{}={}".format(k, repr(getattr(obj, k))) for k in keys]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | [
"def",
"auto_repr",
"(",
"obj",
":",
"Any",
",",
"with_addr",
":",
"bool",
"=",
"False",
",",
"sort_attrs",
":",
"bool",
"=",
"True",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"if",
"sort_attrs",
":",
"keys",
"=",
"sorted",
"(",
"obj",
".",
"__dict__",
".",
"keys",
"(",
")",
")",
"else",
":",
"keys",
"=",
"obj",
".",
"__dict__",
".",
"keys",
"(",
")",
"elements",
"=",
"[",
"\"{}={}\"",
".",
"format",
"(",
"k",
",",
"repr",
"(",
"getattr",
"(",
"obj",
",",
"k",
")",
")",
")",
"for",
"k",
"in",
"keys",
"]",
"return",
"repr_result",
"(",
"obj",
",",
"elements",
",",
"with_addr",
"=",
"with_addr",
",",
"joiner",
"=",
"joiner",
")"
] | 37.047619 | 0.001253 | [
"def auto_repr(obj: Any, with_addr: bool = False,\n",
" sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:\n",
" \"\"\"\n",
" Convenience function for :func:`__repr__`.\n",
" Works its way through the object's ``__dict__`` and reports accordingly.\n",
"\n",
" Args:\n",
" obj: object to display\n",
" with_addr: include the memory address of ``obj``\n",
" sort_attrs: sort the attributes into alphabetical order?\n",
" joiner: string with which to join the elements\n",
"\n",
" Returns:\n",
" string: :func:`repr`-style representation\n",
" \"\"\"\n",
" if sort_attrs:\n",
" keys = sorted(obj.__dict__.keys())\n",
" else:\n",
" keys = obj.__dict__.keys()\n",
" elements = [\"{}={}\".format(k, repr(getattr(obj, k))) for k in keys]\n",
" return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863
] | 21 | 0.000652 |
def _bootstrap_debian(name, **kwargs):
'''
Bootstrap a Debian Linux container
'''
version = kwargs.get('version', False)
if not version:
if __grains__['os'].lower() == 'debian':
version = __grains__['osrelease']
else:
version = 'stable'
release_blacklist = ['hamm', 'slink', 'potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy']
if version in release_blacklist:
raise CommandExecutionError(
'Unsupported Debian version "{0}". '
'Only "stable" or "jessie" and newer are supported'.format(version)
)
dst = _make_container_root(name)
cmd = 'debootstrap --arch=amd64 {0} {1}'.format(version, dst)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
_build_failed(dst, name)
return ret | [
"def",
"_bootstrap_debian",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"version",
"=",
"kwargs",
".",
"get",
"(",
"'version'",
",",
"False",
")",
"if",
"not",
"version",
":",
"if",
"__grains__",
"[",
"'os'",
"]",
".",
"lower",
"(",
")",
"==",
"'debian'",
":",
"version",
"=",
"__grains__",
"[",
"'osrelease'",
"]",
"else",
":",
"version",
"=",
"'stable'",
"release_blacklist",
"=",
"[",
"'hamm'",
",",
"'slink'",
",",
"'potato'",
",",
"'woody'",
",",
"'sarge'",
",",
"'etch'",
",",
"'lenny'",
",",
"'squeeze'",
",",
"'wheezy'",
"]",
"if",
"version",
"in",
"release_blacklist",
":",
"raise",
"CommandExecutionError",
"(",
"'Unsupported Debian version \"{0}\". '",
"'Only \"stable\" or \"jessie\" and newer are supported'",
".",
"format",
"(",
"version",
")",
")",
"dst",
"=",
"_make_container_root",
"(",
"name",
")",
"cmd",
"=",
"'debootstrap --arch=amd64 {0} {1}'",
".",
"format",
"(",
"version",
",",
"dst",
")",
"ret",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"if",
"ret",
"[",
"'retcode'",
"]",
"!=",
"0",
":",
"_build_failed",
"(",
"dst",
",",
"name",
")",
"return",
"ret"
] | 34.75 | 0.002334 | [
"def _bootstrap_debian(name, **kwargs):\n",
" '''\n",
" Bootstrap a Debian Linux container\n",
" '''\n",
" version = kwargs.get('version', False)\n",
" if not version:\n",
" if __grains__['os'].lower() == 'debian':\n",
" version = __grains__['osrelease']\n",
" else:\n",
" version = 'stable'\n",
"\n",
" release_blacklist = ['hamm', 'slink', 'potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy']\n",
" if version in release_blacklist:\n",
" raise CommandExecutionError(\n",
" 'Unsupported Debian version \"{0}\". '\n",
" 'Only \"stable\" or \"jessie\" and newer are supported'.format(version)\n",
" )\n",
"\n",
" dst = _make_container_root(name)\n",
" cmd = 'debootstrap --arch=amd64 {0} {1}'.format(version, dst)\n",
" ret = __salt__['cmd.run_all'](cmd, python_shell=False)\n",
" if ret['retcode'] != 0:\n",
" _build_failed(dst, name)\n",
" return ret"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 24 | 0.003362 |
def request_client_list(self, req, msg):
"""Request the list of connected clients.
The list of clients is sent as a sequence of #client-list informs.
Informs
-------
addr : str
The address of the client as host:port with host in dotted quad
notation. If the address of the client could not be determined
(because, for example, the client disconnected suddenly) then
a unique string representing the client is sent instead.
Returns
-------
success : {'ok', 'fail'}
Whether sending the client list succeeded.
informs : int
Number of #client-list inform messages sent.
Examples
--------
::
?client-list
#client-list 127.0.0.1:53600
!client-list ok 1
"""
# TODO Get list of ClientConnection* instances and implement a standard
# 'address-print' method in the ClientConnection class
clients = self._client_conns
num_clients = len(clients)
for conn in clients:
addr = conn.address
req.inform(addr)
return req.make_reply('ok', str(num_clients)) | [
"def",
"request_client_list",
"(",
"self",
",",
"req",
",",
"msg",
")",
":",
"# TODO Get list of ClientConnection* instances and implement a standard",
"# 'address-print' method in the ClientConnection class",
"clients",
"=",
"self",
".",
"_client_conns",
"num_clients",
"=",
"len",
"(",
"clients",
")",
"for",
"conn",
"in",
"clients",
":",
"addr",
"=",
"conn",
".",
"address",
"req",
".",
"inform",
"(",
"addr",
")",
"return",
"req",
".",
"make_reply",
"(",
"'ok'",
",",
"str",
"(",
"num_clients",
")",
")"
] | 32.189189 | 0.00163 | [
"def request_client_list(self, req, msg):\n",
" \"\"\"Request the list of connected clients.\n",
"\n",
" The list of clients is sent as a sequence of #client-list informs.\n",
"\n",
" Informs\n",
" -------\n",
" addr : str\n",
" The address of the client as host:port with host in dotted quad\n",
" notation. If the address of the client could not be determined\n",
" (because, for example, the client disconnected suddenly) then\n",
" a unique string representing the client is sent instead.\n",
"\n",
" Returns\n",
" -------\n",
" success : {'ok', 'fail'}\n",
" Whether sending the client list succeeded.\n",
" informs : int\n",
" Number of #client-list inform messages sent.\n",
"\n",
" Examples\n",
" --------\n",
" ::\n",
"\n",
" ?client-list\n",
" #client-list 127.0.0.1:53600\n",
" !client-list ok 1\n",
"\n",
" \"\"\"\n",
" # TODO Get list of ClientConnection* instances and implement a standard\n",
" # 'address-print' method in the ClientConnection class\n",
" clients = self._client_conns\n",
" num_clients = len(clients)\n",
" for conn in clients:\n",
" addr = conn.address\n",
" req.inform(addr)\n",
" return req.make_reply('ok', str(num_clients))"
] | [
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886
] | 37 | 0.00105 |
def get_mutations(study_id, gene_list, mutation_type=None,
case_id=None):
"""Return mutations as a list of genes and list of amino acid changes.
Parameters
----------
study_id : str
The ID of the cBio study.
Example: 'cellline_ccle_broad' or 'paad_icgc'
gene_list : list[str]
A list of genes with their HGNC symbols.
Example: ['BRAF', 'KRAS']
mutation_type : Optional[str]
The type of mutation to filter to.
mutation_type can be one of: missense, nonsense, frame_shift_ins,
frame_shift_del, splice_site
case_id : Optional[str]
The case ID within the study to filter to.
Returns
-------
mutations : tuple[list]
A tuple of two lists, the first one containing a list of genes, and
the second one a list of amino acid changes in those genes.
"""
genetic_profile = get_genetic_profiles(study_id, 'mutation')[0]
gene_list_str = ','.join(gene_list)
data = {'cmd': 'getMutationData',
'case_set_id': study_id,
'genetic_profile_id': genetic_profile,
'gene_list': gene_list_str,
'skiprows': -1}
df = send_request(**data)
if case_id:
df = df[df['case_id'] == case_id]
res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'],
'mutation_type', mutation_type)
mutations = {'gene_symbol': list(res['gene_symbol'].values()),
'amino_acid_change': list(res['amino_acid_change'].values())}
return mutations | [
"def",
"get_mutations",
"(",
"study_id",
",",
"gene_list",
",",
"mutation_type",
"=",
"None",
",",
"case_id",
"=",
"None",
")",
":",
"genetic_profile",
"=",
"get_genetic_profiles",
"(",
"study_id",
",",
"'mutation'",
")",
"[",
"0",
"]",
"gene_list_str",
"=",
"','",
".",
"join",
"(",
"gene_list",
")",
"data",
"=",
"{",
"'cmd'",
":",
"'getMutationData'",
",",
"'case_set_id'",
":",
"study_id",
",",
"'genetic_profile_id'",
":",
"genetic_profile",
",",
"'gene_list'",
":",
"gene_list_str",
",",
"'skiprows'",
":",
"-",
"1",
"}",
"df",
"=",
"send_request",
"(",
"*",
"*",
"data",
")",
"if",
"case_id",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"'case_id'",
"]",
"==",
"case_id",
"]",
"res",
"=",
"_filter_data_frame",
"(",
"df",
",",
"[",
"'gene_symbol'",
",",
"'amino_acid_change'",
"]",
",",
"'mutation_type'",
",",
"mutation_type",
")",
"mutations",
"=",
"{",
"'gene_symbol'",
":",
"list",
"(",
"res",
"[",
"'gene_symbol'",
"]",
".",
"values",
"(",
")",
")",
",",
"'amino_acid_change'",
":",
"list",
"(",
"res",
"[",
"'amino_acid_change'",
"]",
".",
"values",
"(",
")",
")",
"}",
"return",
"mutations"
] | 37.439024 | 0.000635 | [
"def get_mutations(study_id, gene_list, mutation_type=None,\n",
" case_id=None):\n",
" \"\"\"Return mutations as a list of genes and list of amino acid changes.\n",
"\n",
" Parameters\n",
" ----------\n",
" study_id : str\n",
" The ID of the cBio study.\n",
" Example: 'cellline_ccle_broad' or 'paad_icgc'\n",
" gene_list : list[str]\n",
" A list of genes with their HGNC symbols.\n",
" Example: ['BRAF', 'KRAS']\n",
" mutation_type : Optional[str]\n",
" The type of mutation to filter to.\n",
" mutation_type can be one of: missense, nonsense, frame_shift_ins,\n",
" frame_shift_del, splice_site\n",
" case_id : Optional[str]\n",
" The case ID within the study to filter to.\n",
"\n",
" Returns\n",
" -------\n",
" mutations : tuple[list]\n",
" A tuple of two lists, the first one containing a list of genes, and\n",
" the second one a list of amino acid changes in those genes.\n",
" \"\"\"\n",
" genetic_profile = get_genetic_profiles(study_id, 'mutation')[0]\n",
" gene_list_str = ','.join(gene_list)\n",
"\n",
" data = {'cmd': 'getMutationData',\n",
" 'case_set_id': study_id,\n",
" 'genetic_profile_id': genetic_profile,\n",
" 'gene_list': gene_list_str,\n",
" 'skiprows': -1}\n",
" df = send_request(**data)\n",
" if case_id:\n",
" df = df[df['case_id'] == case_id]\n",
" res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'],\n",
" 'mutation_type', mutation_type)\n",
" mutations = {'gene_symbol': list(res['gene_symbol'].values()),\n",
" 'amino_acid_change': list(res['amino_acid_change'].values())}\n",
" return mutations"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 41 | 0.00122 |
def remove_spurious_insertions(scaffolds):
"""Remove all bins whose left and right neighbors belong to the same,
different scaffold.
Example with three such insertions in two different scaffolds:
>>> scaffolds = {
... "scaffold1": [
... ["contig1", 0, 0, 100, 1],
... ["contig1", 1, 100, 200, 1],
... ["contig23", 53, 1845, 2058, -1], # <-- insertion
... ["contig1", 4, 254, 408, 1],
... ["contig1", 7, 805, 1253, 1],
... ["contig5", 23, 1500, 1605, -1],
... ["contig65", 405, 32145, 45548, -1], # <-- insertion
... ["contig5", 22, 1385, 1499, -1],
... ],
... "scaffold2": [
... ["contig8", 0, 0, 250, 1],
... ["contig17", 2454, 8754, -1], # <-- insertion
... ["contig8", 2, 320, 480, 1],
... ],
... }
>>> new_scaffolds = remove_spurious_insertions(scaffolds)
>>> for my_bin in new_scaffolds['scaffold1']:
... print(my_bin)
...
['contig1', 0, 0, 100, 1]
['contig1', 1, 100, 200, 1]
['contig1', 4, 254, 408, 1]
['contig1', 7, 805, 1253, 1]
['contig5', 23, 1500, 1605, -1]
['contig5', 22, 1385, 1499, -1]
>>> for my_bin in new_scaffolds['scaffold2']:
... print(my_bin)
...
['contig8', 0, 0, 250, 1]
['contig8', 2, 320, 480, 1]
"""
scaffolds = format_info_frags(scaffolds)
new_scaffolds = {}
for name, scaffold in scaffolds.items():
new_scaffold = []
if len(scaffold) > 2:
for i in range(len(scaffold)):
# First take care of edge cases: *-- or --*
if i == 0:
if not (
scaffold[i][0] != scaffold[i + 1][0]
and scaffold[i + 1][0] == scaffold[i + 2][0]
):
new_scaffold.append(scaffold[i])
elif i == len(scaffold) - 1:
if not (
scaffold[i][0] != scaffold[i - 1][0]
and scaffold[i - 1][0] == scaffold[i - 2][0]
):
new_scaffold.append(scaffold[i])
# Otherwise, looking for -*-
else:
if not (
scaffold[i - 1][0] == scaffold[i + 1][0]
and scaffold[i - 1][0] != scaffold[i][0]
):
new_scaffold.append(scaffold[i])
else:
# Can't remove insertions if 2 bins or less
new_scaffold = copy.deepcopy(scaffold)
new_scaffolds[name] = new_scaffold
return new_scaffolds | [
"def",
"remove_spurious_insertions",
"(",
"scaffolds",
")",
":",
"scaffolds",
"=",
"format_info_frags",
"(",
"scaffolds",
")",
"new_scaffolds",
"=",
"{",
"}",
"for",
"name",
",",
"scaffold",
"in",
"scaffolds",
".",
"items",
"(",
")",
":",
"new_scaffold",
"=",
"[",
"]",
"if",
"len",
"(",
"scaffold",
")",
">",
"2",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"scaffold",
")",
")",
":",
"# First take care of edge cases: *-- or --*",
"if",
"i",
"==",
"0",
":",
"if",
"not",
"(",
"scaffold",
"[",
"i",
"]",
"[",
"0",
"]",
"!=",
"scaffold",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"and",
"scaffold",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"==",
"scaffold",
"[",
"i",
"+",
"2",
"]",
"[",
"0",
"]",
")",
":",
"new_scaffold",
".",
"append",
"(",
"scaffold",
"[",
"i",
"]",
")",
"elif",
"i",
"==",
"len",
"(",
"scaffold",
")",
"-",
"1",
":",
"if",
"not",
"(",
"scaffold",
"[",
"i",
"]",
"[",
"0",
"]",
"!=",
"scaffold",
"[",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
"and",
"scaffold",
"[",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"scaffold",
"[",
"i",
"-",
"2",
"]",
"[",
"0",
"]",
")",
":",
"new_scaffold",
".",
"append",
"(",
"scaffold",
"[",
"i",
"]",
")",
"# Otherwise, looking for -*-",
"else",
":",
"if",
"not",
"(",
"scaffold",
"[",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"scaffold",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"and",
"scaffold",
"[",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
"!=",
"scaffold",
"[",
"i",
"]",
"[",
"0",
"]",
")",
":",
"new_scaffold",
".",
"append",
"(",
"scaffold",
"[",
"i",
"]",
")",
"else",
":",
"# Can't remove insertions if 2 bins or less",
"new_scaffold",
"=",
"copy",
".",
"deepcopy",
"(",
"scaffold",
")",
"new_scaffolds",
"[",
"name",
"]",
"=",
"new_scaffold",
"return",
"new_scaffolds"
] | 35.423077 | 0.000352 | [
"def remove_spurious_insertions(scaffolds):\n",
"\n",
" \"\"\"Remove all bins whose left and right neighbors belong to the same,\n",
" different scaffold.\n",
"\n",
" Example with three such insertions in two different scaffolds:\n",
"\n",
" >>> scaffolds = {\n",
" ... \"scaffold1\": [\n",
" ... [\"contig1\", 0, 0, 100, 1],\n",
" ... [\"contig1\", 1, 100, 200, 1],\n",
" ... [\"contig23\", 53, 1845, 2058, -1], # <-- insertion\n",
" ... [\"contig1\", 4, 254, 408, 1],\n",
" ... [\"contig1\", 7, 805, 1253, 1],\n",
" ... [\"contig5\", 23, 1500, 1605, -1],\n",
" ... [\"contig65\", 405, 32145, 45548, -1], # <-- insertion\n",
" ... [\"contig5\", 22, 1385, 1499, -1],\n",
" ... ],\n",
" ... \"scaffold2\": [\n",
" ... [\"contig8\", 0, 0, 250, 1],\n",
" ... [\"contig17\", 2454, 8754, -1], # <-- insertion\n",
" ... [\"contig8\", 2, 320, 480, 1],\n",
" ... ],\n",
" ... }\n",
"\n",
" >>> new_scaffolds = remove_spurious_insertions(scaffolds)\n",
" >>> for my_bin in new_scaffolds['scaffold1']:\n",
" ... print(my_bin)\n",
" ...\n",
" ['contig1', 0, 0, 100, 1]\n",
" ['contig1', 1, 100, 200, 1]\n",
" ['contig1', 4, 254, 408, 1]\n",
" ['contig1', 7, 805, 1253, 1]\n",
" ['contig5', 23, 1500, 1605, -1]\n",
" ['contig5', 22, 1385, 1499, -1]\n",
"\n",
" >>> for my_bin in new_scaffolds['scaffold2']:\n",
" ... print(my_bin)\n",
" ...\n",
" ['contig8', 0, 0, 250, 1]\n",
" ['contig8', 2, 320, 480, 1]\n",
"\n",
"\n",
" \"\"\"\n",
"\n",
" scaffolds = format_info_frags(scaffolds)\n",
" new_scaffolds = {}\n",
" for name, scaffold in scaffolds.items():\n",
" new_scaffold = []\n",
" if len(scaffold) > 2:\n",
" for i in range(len(scaffold)):\n",
" # First take care of edge cases: *-- or --*\n",
" if i == 0:\n",
" if not (\n",
" scaffold[i][0] != scaffold[i + 1][0]\n",
" and scaffold[i + 1][0] == scaffold[i + 2][0]\n",
" ):\n",
" new_scaffold.append(scaffold[i])\n",
" elif i == len(scaffold) - 1:\n",
" if not (\n",
" scaffold[i][0] != scaffold[i - 1][0]\n",
" and scaffold[i - 1][0] == scaffold[i - 2][0]\n",
" ):\n",
" new_scaffold.append(scaffold[i])\n",
" # Otherwise, looking for -*-\n",
" else:\n",
" if not (\n",
" scaffold[i - 1][0] == scaffold[i + 1][0]\n",
" and scaffold[i - 1][0] != scaffold[i][0]\n",
" ):\n",
" new_scaffold.append(scaffold[i])\n",
" else:\n",
" # Can't remove insertions if 2 bins or less\n",
" new_scaffold = copy.deepcopy(scaffold)\n",
"\n",
" new_scaffolds[name] = new_scaffold\n",
"\n",
" return new_scaffolds"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664
] | 78 | 0.000534 |
def operational(ctx, commands, format, xpath):
""" Execute operational mode command(s).
This function will send operational mode commands to a Junos
device. jaide.utils.clean_lines() is used to determine how we are
receiving commands, and ignore comment lines or blank lines in
a command file.
@param ctx: The click context paramter, for receiving the object dictionary
| being manipulated by other previous functions. Needed by any
| function with the @click.pass_context decorator.
@type ctx: click.Context
@param commands: The op commands to send to the device. Can be one of
| four things:
| 1. A single op command as a string.
| 2. A string of comma separated op commands.
| 3. A python list of op commands.
| 4. A filepath of a file with op commands on each
| line.
@type commands: str
@param format: String specifying what format to request for the
| response from the device. Defaults to 'text', but
| also accepts 'xml'.
@type format: str
@param xpath: An xpath expression on which we should filter the results.
| This enforces 'xml' for the format of the response.
@type xpath: str
@returns: None. Functions part of click relating to the command group
| 'main' do not return anything. Click handles passing context
| between the functions and maintaing command order and chaining.
"""
mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2)
for ip in ctx.obj['hosts']:
mp_pool.apply_async(wrap.open_connection, args=(ip,
ctx.obj['conn']['username'],
ctx.obj['conn']['password'],
wrap.command, [commands, format, xpath],
ctx.obj['out'],
ctx.obj['conn']['connect_timeout'],
ctx.obj['conn']['session_timeout'],
ctx.obj['conn']['port']), callback=write_out)
mp_pool.close()
mp_pool.join() | [
"def",
"operational",
"(",
"ctx",
",",
"commands",
",",
"format",
",",
"xpath",
")",
":",
"mp_pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"*",
"2",
")",
"for",
"ip",
"in",
"ctx",
".",
"obj",
"[",
"'hosts'",
"]",
":",
"mp_pool",
".",
"apply_async",
"(",
"wrap",
".",
"open_connection",
",",
"args",
"=",
"(",
"ip",
",",
"ctx",
".",
"obj",
"[",
"'conn'",
"]",
"[",
"'username'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'conn'",
"]",
"[",
"'password'",
"]",
",",
"wrap",
".",
"command",
",",
"[",
"commands",
",",
"format",
",",
"xpath",
"]",
",",
"ctx",
".",
"obj",
"[",
"'out'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'conn'",
"]",
"[",
"'connect_timeout'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'conn'",
"]",
"[",
"'session_timeout'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'conn'",
"]",
"[",
"'port'",
"]",
")",
",",
"callback",
"=",
"write_out",
")",
"mp_pool",
".",
"close",
"(",
")",
"mp_pool",
".",
"join",
"(",
")"
] | 49.5 | 0.00045 | [
"def operational(ctx, commands, format, xpath):\n",
" \"\"\" Execute operational mode command(s).\n",
"\n",
" This function will send operational mode commands to a Junos\n",
" device. jaide.utils.clean_lines() is used to determine how we are\n",
" receiving commands, and ignore comment lines or blank lines in\n",
" a command file.\n",
"\n",
" @param ctx: The click context paramter, for receiving the object dictionary\n",
" | being manipulated by other previous functions. Needed by any\n",
" | function with the @click.pass_context decorator.\n",
" @type ctx: click.Context\n",
" @param commands: The op commands to send to the device. Can be one of\n",
" | four things:\n",
" | 1. A single op command as a string.\n",
" | 2. A string of comma separated op commands.\n",
" | 3. A python list of op commands.\n",
" | 4. A filepath of a file with op commands on each\n",
" | line.\n",
" @type commands: str\n",
" @param format: String specifying what format to request for the\n",
" | response from the device. Defaults to 'text', but\n",
" | also accepts 'xml'.\n",
" @type format: str\n",
" @param xpath: An xpath expression on which we should filter the results.\n",
" | This enforces 'xml' for the format of the response.\n",
" @type xpath: str\n",
"\n",
" @returns: None. Functions part of click relating to the command group\n",
" | 'main' do not return anything. Click handles passing context\n",
" | between the functions and maintaing command order and chaining.\n",
" \"\"\"\n",
" mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2)\n",
" for ip in ctx.obj['hosts']:\n",
" mp_pool.apply_async(wrap.open_connection, args=(ip,\n",
" ctx.obj['conn']['username'],\n",
" ctx.obj['conn']['password'],\n",
" wrap.command, [commands, format, xpath],\n",
" ctx.obj['out'],\n",
" ctx.obj['conn']['connect_timeout'],\n",
" ctx.obj['conn']['session_timeout'],\n",
" ctx.obj['conn']['port']), callback=write_out)\n",
" mp_pool.close()\n",
" mp_pool.join()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 44 | 0.001263 |
def _merge_cdicts(self, clut, exdict, separator):
"""Merge callable look-up tables from two objects."""
if not self._full_cname:
return
# Find all callables that are not in self exceptions dictionary
# and create new tokens for them
repl_dict = {}
for key, value in _sorted_keys_items(clut):
otoken = self._clut.get(key, None)
if not otoken:
otoken = str(len(self._clut))
self._clut[key] = otoken
repl_dict[value] = otoken
# Update other dictionaries to the mapping to self
# exceptions dictionary
for fdict in exdict.values():
for entry in fdict.values():
olist = []
for item in entry["function"]:
if item is None:
# Callable name is None when callable is
# part of exclude list
olist.append(None)
else:
itokens = item.split(separator)
itokens = [repl_dict.get(itoken) for itoken in itokens]
olist.append(separator.join(itokens))
entry["function"] = olist | [
"def",
"_merge_cdicts",
"(",
"self",
",",
"clut",
",",
"exdict",
",",
"separator",
")",
":",
"if",
"not",
"self",
".",
"_full_cname",
":",
"return",
"# Find all callables that are not in self exceptions dictionary",
"# and create new tokens for them",
"repl_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"_sorted_keys_items",
"(",
"clut",
")",
":",
"otoken",
"=",
"self",
".",
"_clut",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"not",
"otoken",
":",
"otoken",
"=",
"str",
"(",
"len",
"(",
"self",
".",
"_clut",
")",
")",
"self",
".",
"_clut",
"[",
"key",
"]",
"=",
"otoken",
"repl_dict",
"[",
"value",
"]",
"=",
"otoken",
"# Update other dictionaries to the mapping to self",
"# exceptions dictionary",
"for",
"fdict",
"in",
"exdict",
".",
"values",
"(",
")",
":",
"for",
"entry",
"in",
"fdict",
".",
"values",
"(",
")",
":",
"olist",
"=",
"[",
"]",
"for",
"item",
"in",
"entry",
"[",
"\"function\"",
"]",
":",
"if",
"item",
"is",
"None",
":",
"# Callable name is None when callable is",
"# part of exclude list",
"olist",
".",
"append",
"(",
"None",
")",
"else",
":",
"itokens",
"=",
"item",
".",
"split",
"(",
"separator",
")",
"itokens",
"=",
"[",
"repl_dict",
".",
"get",
"(",
"itoken",
")",
"for",
"itoken",
"in",
"itokens",
"]",
"olist",
".",
"append",
"(",
"separator",
".",
"join",
"(",
"itokens",
")",
")",
"entry",
"[",
"\"function\"",
"]",
"=",
"olist"
] | 39.892857 | 0.000874 | [
"def _merge_cdicts(self, clut, exdict, separator):\n",
" \"\"\"Merge callable look-up tables from two objects.\"\"\"\n",
" if not self._full_cname:\n",
" return\n",
" # Find all callables that are not in self exceptions dictionary\n",
" # and create new tokens for them\n",
" repl_dict = {}\n",
" for key, value in _sorted_keys_items(clut):\n",
" otoken = self._clut.get(key, None)\n",
" if not otoken:\n",
" otoken = str(len(self._clut))\n",
" self._clut[key] = otoken\n",
" repl_dict[value] = otoken\n",
" # Update other dictionaries to the mapping to self\n",
" # exceptions dictionary\n",
" for fdict in exdict.values():\n",
" for entry in fdict.values():\n",
" olist = []\n",
" for item in entry[\"function\"]:\n",
" if item is None:\n",
" # Callable name is None when callable is\n",
" # part of exclude list\n",
" olist.append(None)\n",
" else:\n",
" itokens = item.split(separator)\n",
" itokens = [repl_dict.get(itoken) for itoken in itokens]\n",
" olist.append(separator.join(itokens))\n",
" entry[\"function\"] = olist"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703
] | 28 | 0.000965 |
def set_autocamera(self,mode='density'):
"""
- set_autocamera(mode='density'): By default, Scene defines its
own Camera. However, there is no a general way for doing so. Scene
uses a density criterion for getting the point of view. If this is
not a good option for your problem, you can choose among:
|'minmax'|'density'|'median'|'mean'|. If None of the previous methods
work well, you may define the camera params by yourself.
"""
self.Camera.set_autocamera(self._Particles,mode=mode)
self._camera_params = self.Camera.get_params()
self._x, self._y, self._hsml, self._kview = self.__compute_scene()
self._m = self._Particles._mass[self._kview] | [
"def",
"set_autocamera",
"(",
"self",
",",
"mode",
"=",
"'density'",
")",
":",
"self",
".",
"Camera",
".",
"set_autocamera",
"(",
"self",
".",
"_Particles",
",",
"mode",
"=",
"mode",
")",
"self",
".",
"_camera_params",
"=",
"self",
".",
"Camera",
".",
"get_params",
"(",
")",
"self",
".",
"_x",
",",
"self",
".",
"_y",
",",
"self",
".",
"_hsml",
",",
"self",
".",
"_kview",
"=",
"self",
".",
"__compute_scene",
"(",
")",
"self",
".",
"_m",
"=",
"self",
".",
"_Particles",
".",
"_mass",
"[",
"self",
".",
"_kview",
"]"
] | 56.230769 | 0.009421 | [
"def set_autocamera(self,mode='density'):\n",
" \"\"\"\n",
" - set_autocamera(mode='density'): By default, Scene defines its \n",
" own Camera. However, there is no a general way for doing so. Scene \n",
" uses a density criterion for getting the point of view. If this is \n",
" not a good option for your problem, you can choose among:\n",
" |'minmax'|'density'|'median'|'mean'|. If None of the previous methods\n",
" work well, you may define the camera params by yourself.\n",
" \"\"\"\n",
" self.Camera.set_autocamera(self._Particles,mode=mode)\n",
" self._camera_params = self.Camera.get_params()\n",
" self._x, self._y, self._hsml, self._kview = self.__compute_scene()\n",
" self._m = self._Particles._mass[self._kview]"
] | [
0.024390243902439025,
0.08333333333333333,
0.0136986301369863,
0.013157894736842105,
0.013157894736842105,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0.019230769230769232
] | 13 | 0.014084 |
def create(self):
""" Creates the directory and all its parent directories if it does not
exist yet
"""
if self.dirname and not os.path.exists(self.dirname):
os.makedirs(self.dirname) | [
"def",
"create",
"(",
"self",
")",
":",
"if",
"self",
".",
"dirname",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"dirname",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"dirname",
")"
] | 37 | 0.008811 | [
"def create(self):\n",
" \"\"\" Creates the directory and all its parent directories if it does not\n",
" exist yet\n",
" \"\"\"\n",
" if self.dirname and not os.path.exists(self.dirname):\n",
" os.makedirs(self.dirname)"
] | [
0,
0.0125,
0,
0,
0,
0.02702702702702703
] | 6 | 0.006588 |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Poll request payload and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(PollRequestPayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(
enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE,
local_stream
):
self._asynchronous_correlation_value = primitives.ByteString(
tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE
)
self._asynchronous_correlation_value.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream) | [
"def",
"read",
"(",
"self",
",",
"input_stream",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"PollRequestPayload",
",",
"self",
")",
".",
"read",
"(",
"input_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_stream",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_stream",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"ASYNCHRONOUS_CORRELATION_VALUE",
",",
"local_stream",
")",
":",
"self",
".",
"_asynchronous_correlation_value",
"=",
"primitives",
".",
"ByteString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"ASYNCHRONOUS_CORRELATION_VALUE",
")",
"self",
".",
"_asynchronous_correlation_value",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"is_oversized",
"(",
"local_stream",
")"
] | 36.611111 | 0.001478 | [
"def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n",
" \"\"\"\n",
" Read the data encoding the Poll request payload and decode it into\n",
" its constituent parts.\n",
"\n",
" Args:\n",
" input_stream (stream): A data stream containing encoded object\n",
" data, supporting a read method; usually a BytearrayStream\n",
" object.\n",
" kmip_version (KMIPVersion): An enumeration defining the KMIP\n",
" version with which the object will be decoded. Optional,\n",
" defaults to KMIP 1.0.\n",
"\n",
" Raises:\n",
" ValueError: Raised if the data attribute is missing from the\n",
" encoded payload.\n",
" \"\"\"\n",
" super(PollRequestPayload, self).read(\n",
" input_stream,\n",
" kmip_version=kmip_version\n",
" )\n",
" local_stream = utils.BytearrayStream(input_stream.read(self.length))\n",
"\n",
" if self.is_tag_next(\n",
" enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE,\n",
" local_stream\n",
" ):\n",
" self._asynchronous_correlation_value = primitives.ByteString(\n",
" tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE\n",
" )\n",
" self._asynchronous_correlation_value.read(\n",
" local_stream,\n",
" kmip_version=kmip_version\n",
" )\n",
"\n",
" self.is_oversized(local_stream)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564
] | 36 | 0.003027 |
def rand_crop(*args, padding_mode='reflection', p:float=1.):
"Randomized version of `crop_pad`."
return crop_pad(*args, **rand_pos, padding_mode=padding_mode, p=p) | [
"def",
"rand_crop",
"(",
"*",
"args",
",",
"padding_mode",
"=",
"'reflection'",
",",
"p",
":",
"float",
"=",
"1.",
")",
":",
"return",
"crop_pad",
"(",
"*",
"args",
",",
"*",
"*",
"rand_pos",
",",
"padding_mode",
"=",
"padding_mode",
",",
"p",
"=",
"p",
")"
] | 56.333333 | 0.023392 | [
"def rand_crop(*args, padding_mode='reflection', p:float=1.):\n",
" \"Randomized version of `crop_pad`.\"\n",
" return crop_pad(*args, **rand_pos, padding_mode=padding_mode, p=p)"
] | [
0.04918032786885246,
0,
0.014285714285714285
] | 3 | 0.021155 |
def elcm_session_delete(irmc_info, session_id, terminate=False):
"""send an eLCM request to remove a session from the session list
:param irmc_info: node info
:param session_id: session id
:param terminate: a running session must be terminated before removing
:raises: ELCMSessionNotFound if the session does not exist
:raises: SCCIClientError if SCCI failed
"""
# Terminate the session first if needs to
if terminate:
# Get session status to check
session = elcm_session_get_status(irmc_info, session_id)
status = session['Session']['Status']
# Terminate session if it is activated or running
if status == 'running' or status == 'activated':
elcm_session_terminate(irmc_info, session_id)
# Send DELETE request to the server
resp = elcm_request(irmc_info,
method='DELETE',
path='/sessionInformation/%s/remove' % session_id)
if resp.status_code == 200:
return
elif resp.status_code == 404:
raise ELCMSessionNotFound('Session "%s" does not exist' % session_id)
else:
raise scci.SCCIClientError(('Failed to remove session '
'"%(session)s" with error code %(error)s' %
{'session': session_id,
'error': resp.status_code})) | [
"def",
"elcm_session_delete",
"(",
"irmc_info",
",",
"session_id",
",",
"terminate",
"=",
"False",
")",
":",
"# Terminate the session first if needs to",
"if",
"terminate",
":",
"# Get session status to check",
"session",
"=",
"elcm_session_get_status",
"(",
"irmc_info",
",",
"session_id",
")",
"status",
"=",
"session",
"[",
"'Session'",
"]",
"[",
"'Status'",
"]",
"# Terminate session if it is activated or running",
"if",
"status",
"==",
"'running'",
"or",
"status",
"==",
"'activated'",
":",
"elcm_session_terminate",
"(",
"irmc_info",
",",
"session_id",
")",
"# Send DELETE request to the server",
"resp",
"=",
"elcm_request",
"(",
"irmc_info",
",",
"method",
"=",
"'DELETE'",
",",
"path",
"=",
"'/sessionInformation/%s/remove'",
"%",
"session_id",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"return",
"elif",
"resp",
".",
"status_code",
"==",
"404",
":",
"raise",
"ELCMSessionNotFound",
"(",
"'Session \"%s\" does not exist'",
"%",
"session_id",
")",
"else",
":",
"raise",
"scci",
".",
"SCCIClientError",
"(",
"(",
"'Failed to remove session '",
"'\"%(session)s\" with error code %(error)s'",
"%",
"{",
"'session'",
":",
"session_id",
",",
"'error'",
":",
"resp",
".",
"status_code",
"}",
")",
")"
] | 41.757576 | 0.000709 | [
"def elcm_session_delete(irmc_info, session_id, terminate=False):\n",
" \"\"\"send an eLCM request to remove a session from the session list\n",
"\n",
" :param irmc_info: node info\n",
" :param session_id: session id\n",
" :param terminate: a running session must be terminated before removing\n",
" :raises: ELCMSessionNotFound if the session does not exist\n",
" :raises: SCCIClientError if SCCI failed\n",
" \"\"\"\n",
" # Terminate the session first if needs to\n",
" if terminate:\n",
" # Get session status to check\n",
" session = elcm_session_get_status(irmc_info, session_id)\n",
" status = session['Session']['Status']\n",
"\n",
" # Terminate session if it is activated or running\n",
" if status == 'running' or status == 'activated':\n",
" elcm_session_terminate(irmc_info, session_id)\n",
"\n",
" # Send DELETE request to the server\n",
" resp = elcm_request(irmc_info,\n",
" method='DELETE',\n",
" path='/sessionInformation/%s/remove' % session_id)\n",
"\n",
" if resp.status_code == 200:\n",
" return\n",
" elif resp.status_code == 404:\n",
" raise ELCMSessionNotFound('Session \"%s\" does not exist' % session_id)\n",
" else:\n",
" raise scci.SCCIClientError(('Failed to remove session '\n",
" '\"%(session)s\" with error code %(error)s' %\n",
" {'session': session_id,\n",
" 'error': resp.status_code}))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385
] | 33 | 0.000466 |