content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def pytest_funcarg__testname(request):
"""
The testname as string, or ``None``, if no testname is known.
This is the parameter added by the test generation hook, or ``None`` if no
parameter was set, because test generation didn't add a call for this test.
"""
return getattr(request, 'param', None) | 87444cda36635b21c27d260835f96670d6b2d215 | 2,000 |
import pathlib
import json
import logging
def try_log_conf_file(file_path: pathlib.Path) -> bool:
"""It tries to open a log configuration file.
filePath: filePath
return: boolean (True is succeed, False otherwise)
"""
global logger
try:
with file_path.open() as f:
logger_conf = json.load(f)
logging.config.dictConfig(logger_conf)
logger = logging.getLogger(__name__)
logger.debug("logger started from %s", str(pathlib.Path.cwd()))
logger.info("%s found", str(file_path))
return True
except FileNotFoundError as e:
logger.info("%s not found: %s", str(file_path), str(e))
return False | c24d4d15fc43870639acac575562d0e1487bfcf5 | 2,001 |
def notes_to_editor_view(notes):
"""Convert notes object content to more readble view
Args:
notes (list): list of note object
Returns:
list: list of note object
"""
for note in notes:
note.content = to_editor(note.content)
return notes | 44dfa40fb0bf3c5c3c2aafb2731583b6e13d8853 | 2,002 |
def normalization(arr, normalize_mode, norm_range = [0,1]):
"""
Helper function: Normalizes the image based on the specified mode and range
Args:
arr: numpy array
normalize_mode: either "whiten", "normalize_clip", or "normalize" representing the type of normalization to use
norm_range: (Optional) Specifies the range for the numpy array values
Returns:
A normalized array based on the specifications
"""
# reiniating the batch_size dimension
if normalize_mode == "whiten":
return whiten(arr)
elif normalize_mode == "normalize_clip":
return normalize_clip(arr, norm_range = norm_range)
elif normalize_mode == "normalize":
return minmax_normalize(arr, norm_range = norm_range)
else:
return NotImplementedError("Please use the supported modes.") | 8400419db77c2f76ba63999ecae89eb3fbdfae6d | 2,003 |
def draw_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=0):
"""Visualizes a single binary mask."""
was_pil = isinstance(img, (Image.Image))
img = np.array(img)
img = img.astype(np.float32)
idx = np.nonzero(mask)
img[idx[0], idx[1], :] *= 1.0 - alpha
img[idx[0], idx[1], :] += alpha * col
if border_thick:
contours, hierarchy = cv2.findContours(
mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img, contours, -1, _WHITE, border_thick, cv2.LINE_AA)
img = img.astype(np.uint8)
return Image.fromarray(img) if was_pil else img | 047bfc2f26ed38c28ff31f46746542a5d56182c4 | 2,004 |
def build_md_page(page_info: parser.PageInfo) -> str:
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: Must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`.
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if isinstance(page_info, parser.ClassPageInfo):
return ClassPageBuilder(page_info).build()
if isinstance(page_info, parser.FunctionPageInfo):
return FunctionPageBuilder(page_info).build()
if isinstance(page_info, parser.ModulePageInfo):
return ModulePageBuilder(page_info).build()
if isinstance(page_info, parser.TypeAliasPageInfo):
return TypeAliasPageBuilder(page_info).build()
raise ValueError(f'Unknown Page Info Type: {type(page_info)}') | 86ed4f8e1b9b733f45e827c65b067295a9a2ff06 | 2,005 |
from typing import Optional
def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node:
"""Return a node which transposes the data in the input tensor.
@param data: The input tensor to be transposed
@param input_order: Permutation of axes to be applied to the input tensor
@return Transpose node
"""
return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order)) | bc84792893352cdd235efd9e33fdc53cadd6521f | 2,006 |
def find_opposite_reader(card_reader_list, find):
"""Returns the card reader on the opposite side of the door for the card reader in find"""
for c in card_reader_list:
if c.room_a == find.room_b and c.room_b == find.room_a:
return c
raise (Exception("No reader on opposite side found")) | 8a70b9b35174be62f3ca816f385b4c29a6ebebe8 | 2,007 |
def tag_from_clark(name):
"""Get a human-readable variant of the XML Clark notation tag ``name``.
For a given name using the XML Clark notation, return a human-readable
variant of the tag name for known namespaces. Otherwise, return the name as
is.
"""
match = CLARK_TAG_REGEX.match(name)
if match and match.group("namespace") in NAMESPACES_REV:
args = {"ns": NAMESPACES_REV[match.group("namespace")], "tag": match.group("tag")}
return "%(ns)s:%(tag)s" % args
return name | 948ea17b017926353a37d2ceab031751146e445a | 2,008 |
def build_k_indices(y, k_fold, seed):
"""
Randomly partitions the indices of the data set into k groups
Args:
y: labels, used for indexing
k_fold: number of groups after the partitioning
seed: the random seed value
Returns:
k_indices: an array of k sub-indices that are randomly partitioned
"""
num_rows = y.shape[0]
interval = int(num_rows / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_rows)
k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]
return np.array(k_indices) | 3d5684ef59bc1ac0abeca243c394499258be5b54 | 2,009 |
def get_parent(obj, default=_marker):
"""Returns the container the object was traversed via.
Returns None if the object is a containment root.
Raises TypeError if the object doesn't have enough context to get the
parent.
"""
if IRoot.providedBy(obj):
return None
parent = aq_parent(aq_inner(obj))
if parent is not None:
return parent
if default != _marker:
return default
raise TypeError("Not enough context information to get parent", obj) | a6c53ddd4a8bfb81f211737edf1da12688a3f4e2 | 2,010 |
import numpy
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d array [batch_size x rel_docs_per_query]
:param target: 2d array [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.shape == target.shape
sorted, indices = numpy.sort(logits, 1)[::-1], numpy.argsort(-logits, 1)
reciprocal_rank = 0
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
if target[i, indices[i, j]] == 1:
reciprocal_rank += 1.0 / (j + 1)
break
return reciprocal_rank / indices.shape[0] | eb9249bf0e3942aeb01b148a0db28c3e5f9dd00a | 2,011 |
def range(starts,
limits=None,
deltas=1,
dtype=None,
name=None,
row_splits_dtype=dtypes.int64):
"""Returns a `RaggedTensor` containing the specified sequences of numbers.
Each row of the returned `RaggedTensor` contains a single sequence:
```python
ragged.range(starts, limits, deltas)[i] ==
tf.range(starts[i], limits[i], deltas[i])
```
If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an
empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then
`output[i]` will be an empty list. This behavior is consistent with the
Python `range` function, but differs from the `tf.range` op, which returns
an error for these cases.
Examples:
>>> tf.ragged.range([3, 5, 2]).to_list()
[[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list()
[[0, 1, 2], [], [8, 9, 10, 11]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list()
[[0, 2], [], [8, 10]]
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
The vector inputs must all have the same size. Scalar inputs are broadcast
to match the size of the vector inputs.
Args:
starts: Vector or scalar `Tensor`. Specifies the first entry for each range
if `limits` is not `None`; otherwise, specifies the range limits, and the
first entries default to `0`.
limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for
each range.
deltas: Vector or scalar `Tensor`. Specifies the increment for each range.
Defaults to `1`.
dtype: The type of the elements of the resulting tensor. If not specified,
then a value is chosen based on the other args.
name: A name for the operation.
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` of type `dtype` with `ragged_rank=1`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if limits is None:
starts, limits = 0, starts
with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:
starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')
limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')
deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')
# infer dtype if not explicitly provided
if dtype is None:
starts, limits, deltas = _infer_matching_dtype(
[starts, limits, deltas],
[dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
result = gen_ragged_math_ops.ragged_range(
starts, limits, deltas, Tsplits=row_splits_dtype, name=name)
return ragged_tensor.RaggedTensor.from_row_splits(
result.rt_dense_values, result.rt_nested_splits, validate=False) | 177c956844596b5125c288db8859a38ecf4e8b80 | 2,012 |
def ecef2enuv(u, v, w, lat0, lon0, deg=True):
"""
for VECTOR i.e. between two points
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
"""
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lon0) * u + sin(lon0) * v
uEast = -sin(lon0) * u + cos(lon0) * v
wUp = cos(lat0) * t + sin(lat0) * w
vNorth = -sin(lat0) * t + cos(lat0) * w
return uEast, vNorth, wUp | b9b6adb9232407043927cdbc0c2cec4f0b9b50a2 | 2,013 |
def interpolate_ray_dist(ray_dists, order='spline'):
""" interpolate ray distances
:param [float] ray_dists:
:param str order: degree of interpolation
:return [float]:
>>> vals = np.sin(np.linspace(0, 2 * np.pi, 20)) * 10
>>> np.round(vals).astype(int).tolist()
[0, 3, 6, 8, 10, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -8, -6, -3, 0]
>>> vals[3:7] = -1
>>> vals[16:] = -1
>>> vals_interp = interpolate_ray_dist(vals, order=3)
>>> np.round(vals_interp).astype(int).tolist()
[0, 3, 6, 9, 10, 10, 8, 7, 5, 2, -2, -5, -7, -9, -10, -10, -10, -8, -4, 1]
>>> vals_interp = interpolate_ray_dist(vals, order='spline')
>>> np.round(vals_interp).astype(int).tolist()
[0, 3, 6, 8, 9, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -9, -7, -5, -3]
>>> vals_interp = interpolate_ray_dist(vals, order='cos')
>>> np.round(vals_interp).astype(int).tolist()
[0, 3, 6, 8, 10, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -8, -6, -3, 0]
"""
x_space = np.arange(len(ray_dists))
ray_dists = np.array(ray_dists)
missing = ray_dists == -1
x_train = x_space[ray_dists != -1]
x_train_ext = np.hstack((x_train - len(x_space),
x_train,
x_train + len(x_space)))
y_train = ray_dists[ray_dists != -1]
y_train_ext = np.array(y_train.tolist() * 3)
if isinstance(order, int):
# model = pipeline.make_pipeline(preprocessing.PolynomialFeatures(order),
# linear_model.Ridge())
# model.fit(x_space[ray_dists != -1], ray_dists[ray_dists != -1])
# ray_dists[ray_dists == -1] = model.predict(x_space[ray_dists == -1])
z = np.polyfit(x_train, y_train, order)
fn_interp = np.poly1d(z)
ray_dists[missing] = fn_interp(x_space[missing])
elif order == 'spline':
uinterp_us = interpolate.InterpolatedUnivariateSpline(x_train_ext,
y_train_ext)
ray_dists[missing] = uinterp_us(x_space[missing])
elif order == 'cos':
def _fn_cos(x, t):
return x[0] + x[1] * np.sin(x[2] + x[3] * t)
def _fn_cos_residual(x, t, y):
return _fn_cos(x, t) - y
x0 = np.array([np.mean(y_train), (y_train.max() - y_train.min()) / 2.,
0, len(x_space) / np.pi])
lsm_res = optimize.least_squares(_fn_cos_residual, x0, gtol=1e-1,
# loss='soft_l1', f_scale=0.1,
args=(x_train, y_train))
ray_dists[missing] = _fn_cos(lsm_res.x, x_space[missing])
return ray_dists | f1ef1906fd2871e995355a7dd8818a946eefe1e3 | 2,014 |
def distance(left, right, pairwise=pairwise['prod'], distance_function=None):
"""
Calculate the distance between two *k*-mer profiles.
:arg left, right: Profiles to calculate distance
between.
:return: The distance between `left` and `right`.
:rtype: float
"""
if not distance_function:
return multiset(left, right, pairwise)
return distance_function(left, right) | 1be9b2777cf58bf52e2e33d6c39ed3655edc2354 | 2,015 |
def _rec_get_all_imports_exports(fips_dir, proj_dir, result) :
"""recursively get all imported projects, their exported and
imported modules in a dictionary object:
project-1:
url: git-url (not valid for first, top-level project)
exports:
header-dirs: [ ]
conditional-header-dirs:
dir: cmake-if condition string
lib-dirs: [ ]
defines:
def-key: def-val
...
modules :
mod: dir
mod: dir
...
imports:
name:
git: [git-url]
branch: [optional: branch or tag]
cond: [optional: cmake-if condition string conditionally including the dependency]
name:
...
...
...
:param fips_dir: absolute fips directory
:param proj_dir: absolute project directory
:param result: in/out current result
:returns: bool success, and modified result dictionary
"""
success = True
ws_dir = util.get_workspace_dir(fips_dir)
proj_name = util.get_project_name_from_dir(proj_dir)
if proj_name not in result :
imports = get_imports(fips_dir, proj_dir)
exports = get_exports(proj_dir)
for dep_proj_name in imports :
if dep_proj_name not in result :
dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)
dep_url = imports[dep_proj_name]['git']
success, result = _rec_get_all_imports_exports(fips_dir, dep_proj_dir, result)
# break recursion on error
if not success :
return success, result
result[proj_name] = {}
result[proj_name]['proj_dir'] = proj_dir
result[proj_name]['imports'] = imports
result[proj_name]['exports'] = exports
# done
return success, result | 66c0d25d27559e6841bcfced49646f5a711bfeb3 | 2,016 |
from typing import Optional
from typing import Sequence
def get_database_cluster(name: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseClusterResult:
"""
Provides information on a DigitalOcean database cluster resource.
## Example Usage
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_database_cluster(name="example-cluster")
pulumi.export("databaseOutput", example.uri)
```
:param str name: The name of the database cluster.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getDatabaseCluster:getDatabaseCluster', __args__, opts=opts, typ=GetDatabaseClusterResult).value
return AwaitableGetDatabaseClusterResult(
database=__ret__.database,
engine=__ret__.engine,
host=__ret__.host,
id=__ret__.id,
maintenance_windows=__ret__.maintenance_windows,
name=__ret__.name,
node_count=__ret__.node_count,
password=__ret__.password,
port=__ret__.port,
private_host=__ret__.private_host,
private_network_uuid=__ret__.private_network_uuid,
private_uri=__ret__.private_uri,
region=__ret__.region,
size=__ret__.size,
tags=__ret__.tags,
uri=__ret__.uri,
urn=__ret__.urn,
user=__ret__.user,
version=__ret__.version) | edc9d4e0264e90a1491a809c40e2cf2961699d80 | 2,017 |
import sys
def caller_name(skip=2):
"""Get a name of a caller module
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
References:
--------
https://gist.github.com/techtonik/2151727
"""
def stack_(frame):
framelist = []
while frame:
framelist.append(frame)
frame = frame.f_back
return framelist
stack = stack_(sys._getframe(1))
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start]
module = getmodule(parentframe)
if module:
ret_name = module.__name__
else:
ret_name = __name__
return ret_name | 8823f3a896d4eefcb3626a571250cbc926431c9c | 2,018 |
import os
def get_dataset():
"""Summary
Returns
-------
TYPE
Description
"""
stms = []
for dirpath, dirnames, filenames in os.walk('TEDLIUM_release2'):
for f in filenames:
if f.endswith('stm'):
stms.append(os.path.join(dirpath, f))
data = []
for stm_i in stms:
with open(stm_i, 'r') as fp:
lines = fp.readlines()
for line_i in lines:
sp = line_i.split()
data.append({
'id': sp[0],
'num': sp[1],
'id2': sp[2],
'start_time': sp[3],
'end_time': sp[4],
'ch': 'wideband' if 'f0' in sp[5] else 'telephone',
'sex': 'male' if 'male' in sp[5] else 'female',
'text': " ".join(
sp[6:]) if sp[6] != 'ignore_time_segment_in_scoring' else ''})
for max_duration in range(30):
durations = []
for stm_i in stms:
with open(stm_i, 'r') as fp:
lines = fp.readlines()
for line_i in lines:
sp = line_i.split()
dur = float(sp[4]) - float(sp[3])
if dur < max_duration:
durations.append(dur)
return data, durations | 1ab0e272d5fc9c8be797b418b3f826e58a0b8904 | 2,019 |
def tesla_loadhook(h, *args, **kwargs):
"""
Converts a load hook into an application processor.
>>> app = auto_application()
>>> def f(*args, **kwargs): "something done before handling request"
...
>>> app.add_processor(loadhook(f, *args, **kwargs))
"""
def processor(handler):
h(*args, **kwargs)
return handler()
return processor | 65743cd9220ddef40294cde0f4f6566ae9235772 | 2,020 |
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): #pragma: no cover
"""
Force a string to be unicode.
If strings_only is True, don't convert (some) non-string-like objects.
Originally copied from the Django source code, further modifications have
been made.
Original copyright and license:
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
if strings_only and is_protected_type(s):
return s
if not isinstance(s, str,):
if hasattr(s, '__unicode__'):
s = str(s)
else:
try:
s = str(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, str):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
return s | 61992707364bfbb3e714bb52005a417387f8d7de | 2,021 |
def extractYoloInfo(yolo_output_format_data):
""" Extract box, objectness, class from yolo output format data """
box = yolo_output_format_data[..., :6]
conf = yolo_output_format_data[..., 6:7]
category = yolo_output_format_data[..., 7:]
return box, conf, category | ff28a5ce5490c61722ca06b0e09b9bd85ee7e111 | 2,022 |
def bbox_diou(bboxes1, bboxes2):
"""
Complete IoU
@param bboxes1: (a, b, ..., 4)
@param bboxes2: (A, B, ..., 4)
x:X is 1:n or n:n or n:1
@return (max(a,A), max(b,B), ...)
ex) (4,):(3,4) -> (3,)
(2,1,4):(2,3,4) -> (2,3)
"""
bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]
bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]
bboxes1_coor = tf.concat(
[
bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,
bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,
],
axis=-1,
)
bboxes2_coor = tf.concat(
[
bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,
bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,
],
axis=-1,
)
left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = bboxes1_area + bboxes2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
enclose_right_down = tf.maximum(
bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]
)
enclose_section = enclose_right_down - enclose_left_up
c_2 = enclose_section[..., 0] ** 2 + enclose_section[..., 1] ** 2
center_diagonal = bboxes2[..., :2] - bboxes1[..., :2]
rho_2 = center_diagonal[..., 0] ** 2 + center_diagonal[..., 1] ** 2
diou = iou - tf.math.divide_no_nan(rho_2, c_2)
return diou | f32e4a289f437494fd738c1128d6e7c7a8e02c7e | 2,023 |
def showp1rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
first parent, or -1 if the changeset has no parents. (DEPRECATED)"""
ctx = context.resource(mapping, b'ctx')
return ctx.p1().rev() | 2c843d5476a8e5b43fa8ac31351de633c5fa3d6c | 2,024 |
def erp_pretax(t,ma,st,ra,par):
""" early retirement pension (efterløn) pretax"""
# initialize
ERP = np.zeros(1)
# pre two year period
if par.T_erp <= t < par.T_two_year:
if ra == 1:
priv = priv_pension(ma,st,par)
ERP[:] = np.maximum(0,par.ERP_high - 0.6*0.05*np.maximum(0, priv - par.ERP_low))
# two year period
elif par.T_two_year <= t < par.T_oap:
# two year rule is satisfied
if ra == 0:
ERP[:] = par.ERP_2
# two year rule not satisfied
elif ra == 1:
priv = priv_pension(ma,st,par)
ERP[:] = np.maximum(0,par.ERP_high - 0.6*0.05*np.maximum(0, priv - par.ERP_low))
# return
return ERP | d9a3142236aa942f8c86db1c484e57e4fc7ee278 | 2,025 |
import os
import subprocess
def main(gen5tt_algo, fs_file, num_tracts, participant_label, session_label, t1_file, eddy_file, bvec_file, bval_file, template_file, atlas_file, output_dir):
"""Console script for tractify."""
work_dir = os.path.join(output_dir, "scratch")
# Set parameters based on CLI, pass through object
parameters = Parameters(
t1_file=t1_file,
fs_file=fs_file,
eddy_file=eddy_file,
bval_file=bval_file,
bvec_file=bvec_file,
work_dir=work_dir,
output_dir=output_dir,
template_file=template_file,
atlas_file=atlas_file,
gen5tt_algo=gen5tt_algo,
num_tracts=num_tracts
)
if (gen5tt_algo == 'freesurfer'):
try:
os.environ["SUBJECTS_DIR"]
except:
print("No SUBJECTS_DIR environment variable found for"
" freesurfer, using '" + os.path.dirname(fs_file) + "' instead")
os.environ["SUBJECTS_DIR"] = os.path.dirname(fs_file)
wf = init_single_ses_wf(participant_label, session_label, parameters)
wf.base_dir = parameters.work_dir
wf.write_graph(graph2use="colored")
wf.config["execution"]["remove_unnecessary_outputs"] = False
wf.config["execution"]["keep_inputs"] = True
wf.run()
# Output the sse file to a text output
# Get string of sse output value
sse_node = next(node.replace('.', '/') for node in wf.list_node_names() if 'dtifit' in node)
# Get the paths of the
subject_session_base = 'single_subject_' + participant_label + '_wf'
sse_file = os.path.join(work_dir, subject_session_base, sse_node, 'dtifit__sse.nii.gz')
# If the sse was generated
if (os.path.isfile(sse_file)):
sse_txt_base = 'sub_' + participant_label + '_ses_' + session_label + '_sse.txt'
sse_txt_scratch = os.path.join(work_dir, subject_session_base, sse_node, sse_txt_base)
# Run the fslstats command on the sse and redirect it to a text output
sse_dtifit_value_command = ['fslstats' , sse_file, '-M']
my_env = os.environ.copy()
my_env["PATH"] = "/usr/sbin:/sbin:" + my_env["PATH"]
sse_txt_file = open(sse_txt_scratch, "w")
subprocess.call(sse_dtifit_value_command, stdout=sse_txt_file)
sse_txt_file.close()
print('Output sse text value is in ' + sse_txt_scratch)
else:
print("SSE wasn't generated, will not output merged text value")
return 0 | 2ad0568612559c3811d1e6aba4c16d11d855c54e | 2,026 |
def add_missing_cmd(command_list):
"""Adds missing cmd tags to the given command list."""
# E.g.: given:
# ['a', '0', '0', '0', '0', '0', '0', '0',
# '0', '0', '0', '0', '0', '0', '0']
# Converts to:
# [['a', '0', '0', '0', '0', '0', '0', '0'],
# ['a', '0', '0', '0', '0', '0', '0', '0']]
# And returns a string that joins these elements with spaces.
cmd_tag = command_list[0]
args = command_list[1:]
final_cmds = []
for arg_batch in grouper(args, NUM_ARGS[cmd_tag]):
final_cmds.append([cmd_tag] + list(arg_batch))
if not final_cmds:
# command has no args (e.g.: 'z')
final_cmds = [[cmd_tag]]
return final_cmds | 190884575d0110f06088b9be70008da56c279344 | 2,027 |
def replace_umlauts(s: str) -> str:
"""
Replace special symbols with the letters with umlauts (ä, ö and ü)
:param s: string with the special symbols (::)
:return: edited string
"""
out = s.replace('A::', 'Ä').replace('O::', 'Ö').replace('U::', 'Ü').replace('a::', 'ä').replace('o::', 'ö') \
.replace('u::', 'ü')
return out | 8fad1f1017a3fd860d7e32fd191dd060b75a7bb8 | 2,028 |
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow | f3515fdfa8c719c8b91a8f76a04d468e545d6f23 | 2,029 |
import os
def get_modules():
"""Returns the list of module names
"""
def listdir(dir):
def clean(name):
name = os.path.basename(name)
if name[-4:] == '.zip':
name = name[:-4]
return name
def is_really_module(name):
for mname in MANIFEST_NAMES:
if os.path.isfile(opj(dir, name, mname)):
return True
return map(clean, filter(is_really_module, os.listdir(dir)))
plist = []
initialize_sys_path()
for ad in ad_paths:
plist.extend(listdir(ad))
return list(set(plist)) | 7d61468330704167b6a7a93787533e80dc78d0a0 | 2,030 |
def resnet_50(num_classes, data_format='channels_first', pruning_method=None):
"""Returns the ResNet model for a given size and number of output classes."""
return resnet_50_generator(
block_fn=bottleneck_block_,
lst_layers=[3, 4, 6, 3],
num_classes=num_classes,
pruning_method=pruning_method,
data_format=data_format) | 4962f9a4cf4aaaf0052941279c8156e29b2cb639 | 2,031 |
import json
import base64
def read_amuselabs_data(s):
"""
Read in an amuselabs string, return a dictionary of data
"""
# Data might be base64'd or not
try:
data = json.loads(s)
except json.JSONDecodeError:
s1 = base64.b64decode(s)
data = json.loads(s1)
ret = {}
# metadata
# technically these can be codewords but i've never seen one
kind = "crossword"
width, height = data['w'], data['h']
ret['metadata'] = {
'width': width
, 'height': height
, 'kind': kind
, 'author': data.get('author')
, 'title': data.get('title')
, 'copyright': data.get('copyright')
, 'noClueCells': True
# no notepad?
}
# grid
grid = []
box = data['box']
cellInfos = data.get('cellInfos', [])
# Reshape cellInfos to make lookup easier
markup = {}
for c in cellInfos:
markup[(c['x'], c['y'])] = c
for y in range(height):
for x in range(width):
cell = {'x': x, 'y': y, 'value': None}
if box[x][y] == '\x00':
cell['isBlock'] = True
else:
cell['solution'] = box[x][y]
style = {}
if markup.get((x, y)):
thisMarkup = markup[(x, y)]
if thisMarkup.get('isCircled'):
style['shapebg'] = 'circle'
if thisMarkup.get('isVoid'):
cell['isBlock'] = False
cell['isVoid'] = True
bar_string = ''
for letter, side in {'B': 'bottom', 'R': 'right'}.items():
if thisMarkup.get(f'{side}Wall'):
bar_string += letter
if bar_string:
style['barred'] = bar_string
cell['style'] = style
grid.append(cell)
ret['grid'] = grid
# clues
placed_words = data['placedWords']
across_words = [word for word in placed_words if word['acrossNotDown']]
down_words = [word for word in placed_words if not word['acrossNotDown']]
# sorting is probably unnecessary
across_words = sorted(across_words, key=lambda x: (x['y'], x['x']))
down_words = sorted(down_words, key=lambda x: (x['y'], x['x']))
across_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in across_words]
down_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in down_words]
ret['clues'] = [{'title': 'Across', 'clues': across_clues}, {'title': 'Down', 'clues': down_clues}]
return ret | f9c2fb2807d1003261bec7b58e4ba025aac65a6a | 2,032 |
def calinski_harabasz(dataset_values:DatasetValues):
"""Calinski, T.; Harabasz, J. (1974). A dendrite method for cluster analysis.
Communications in Statistics - Theory and Methods, v.3, n.1, p.1�27.
The objective is maximize value [0, +Inf]"""
if dataset_values.K == 1:
return 0
return calinski_harabasz_score(dataset_values.data, dataset_values.cluster_labels) | c8231971350d22d1067056c53838f0536ae03e77 | 2,033 |
from re import IGNORECASE
def parse_version(version):
"""
input version string of the form:
'Major.Minor.Patch+CommitHash'
like:
'0.1.5+95ffef4'
------ or ------
'0.1.0'
returns version_info tuple of the form:
(major,minor,patch,hash)
like:
(0, 1, 5, '95ffef4')
-------- or --------
(0, 1, 0, '')
"""
matches = match(
'(?P<major>[0-9]+)\.(?P<minor>[0-9]+)\.(?P<patch>[0-9]+)(g(?P<hash>[a-z0-9]*))?',
version,
IGNORECASE
)
if matches:
major = int(matches.group('major'))
minor = int(matches.group('minor'))
patch = int(matches.group('patch'))
hash = matches.group('hash') or ''
return (major,minor,patch,hash)
else:
raise ValueError("Version string, '%s' could not be parsed. It should be of the form: 'Major.Minor.Patch+CommitHash'." % version) | cc9b326e498991092a494458d4f98cce7bbb28f9 | 2,034 |
def _location_sensitive_score(W_query, W_fil, W_keys):
"""Impelements Bahdanau-style (cumulative) scoring function.
This attention is described in:
J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-
gio, “Attention-based models for speech recognition,” in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577–585.
#############################################################################
hybrid attention (content-based + location-based)
f = F * α_{i-1}
energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))
#############################################################################
Args:
W_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.
W_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'
W_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.
Returns:
A '[batch_size, max_time]' attention score (energy)
"""
# Get the number of hidden units from the trailing dimension of keys
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or array_ops.shape(W_keys)[-1]
v_a = tf.get_variable(
"attention_variable_projection",
shape=[num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True,
)
print(v_a)
b_a = tf.get_variable(
"attention_bias",
shape=[num_units],
dtype=dtype,
initializer=tf.zeros_initializer(),
)
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2]) | f3daa106f6ac819ef5037a221e2cd768d6810642 | 2,035 |
def get_streamdecks():
"""
Retrieves all connected streamdecks
"""
streamdecks = DeviceManager().enumerate()
return streamdecks | f649fe4404ec6be71cdb4f9cd5805738e1d0b823 | 2,036 |
import six
def clean_string(text):
"""
Remove Lucene reserved characters from query string
"""
if isinstance(text, six.string_types):
return text.translate(UNI_SPECIAL_CHARS).strip()
return text.translate(None, STR_SPECIAL_CHARS).strip() | 5387d76d4dc47997eac751538670cc426d854449 | 2,037 |
def convert_single_example(example_index, example, label_size, max_seq_length,
tokenizer, max_qa_length):
"""Loads a data file into a list of `InputBatch`s."""
# RACE is a multiple choice task. To perform this task using AlBERT,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given RACE example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
if isinstance(example, classifier_utils.PaddingInputExample):
return classifier_utils.InputFeatures(
example_id=0,
input_ids=[[0] * max_seq_length] * label_size,
input_mask=[[0] * max_seq_length] * label_size,
segment_ids=[[0] * max_seq_length] * label_size,
label_id=0,
is_real_example=False)
else:
context_tokens = tokenizer.tokenize(example.context_sentence)
if example.start_ending is not None:
start_ending_tokens = tokenizer.tokenize(example.start_ending)
all_input_tokens = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
for ending in example.endings:
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
if example.start_ending is not None:
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
else:
ending_tokens = tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
ending_tokens = ending_tokens[- max_qa_length:]
if len(context_tokens_choice) + len(ending_tokens) > max_seq_length - 3:
context_tokens_choice = context_tokens_choice[: (
max_seq_length - 3 - len(ending_tokens))]
tokens = ["[CLS]"] + context_tokens_choice + (
["[SEP]"] + ending_tokens + ["[SEP]"])
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (
len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
all_input_tokens.append(tokens)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
label = example.label
if example_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("id: {}".format(example.example_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in \
enumerate(zip(all_input_tokens, all_input_ids, all_input_mask, all_segment_ids)):
tf.logging.info("choice: {}".format(choice_idx))
tf.logging.info("tokens: {}".format(" ".join(tokens)))
tf.logging.info(
"input_ids: {}".format(" ".join(map(str, input_ids))))
tf.logging.info(
"input_mask: {}".format(" ".join(map(str, input_mask))))
tf.logging.info(
"segment_ids: {}".format(" ".join(map(str, segment_ids))))
tf.logging.info("label: {}".format(label))
return classifier_utils.InputFeatures(
example_id=example.example_id,
input_ids=all_input_ids,
input_mask=all_input_mask,
segment_ids=all_segment_ids,
label_id=label
) | 385f5f2801a41e0216e8a8c22d089e986bb55588 | 2,038 |
def GetAllProperties(layers='core'):
"""Return all properties in the graph."""
global Utc
KEY = "AllProperties:%s" % layers
if DataCache.get(KEY,Utc):
#logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
#logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Thing")
props = GetSources(Unit.GetUnit("rdf:type", True), Unit.GetUnit("rdf:Property", True), layers=EVERYLAYER)
res = []
for prop in props:
if inLayer(layers,prop):
res.append(prop)
sorted_all_properties = sorted(res, key=lambda u: u.id)
DataCache.put(KEY,sorted_all_properties,Utc)
return sorted_all_properties | f3bf05ce6a4497e036cd12e4a05db603f10ca9e6 | 2,039 |
from typing import Tuple
from typing import Optional
def _single_optimal_block(x: NDArray) -> Tuple[float, float]:
"""
Compute the optimal window length for a single series
Parameters
----------
x : ndarray
The data to use in the optimal window estimation
Returns
-------
stationary : float
Estimated optimal window length for stationary bootstrap
circular : float
Estimated optimal window length for circular bootstrap
"""
nobs = x.shape[0]
eps = x - x.mean(0)
b_max = np.ceil(min(3 * np.sqrt(nobs), nobs / 3))
kn = max(5, int(np.log10(nobs)))
m_max = int(np.ceil(np.sqrt(nobs))) + kn
# Find first collection of kn autocorrelations that are insignificant
cv = 2 * np.sqrt(np.log10(nobs) / nobs)
acv = np.zeros(m_max + 1)
abs_acorr = np.zeros(m_max + 1)
opt_m: Optional[int] = None
for i in range(m_max + 1):
v1 = eps[i + 1 :] @ eps[i + 1 :]
v2 = eps[: -(i + 1)] @ eps[: -(i + 1)]
cross_prod = eps[i:] @ eps[: nobs - i]
acv[i] = cross_prod / nobs
abs_acorr[i] = np.abs(cross_prod) / np.sqrt(v1 * v2)
if i >= kn:
if np.all(abs_acorr[i - kn : i] < cv) and opt_m is None:
opt_m = i - kn
m = 2 * max(opt_m, 1) if opt_m is not None else m_max
m = min(m, m_max)
g = 0.0
lr_acv = acv[0]
for k in range(1, m + 1):
lam = 1 if k / m <= 1 / 2 else 2 * (1 - k / m)
g += 2 * lam * k * acv[k]
lr_acv += 2 * lam * acv[k]
d_sb = 2 * lr_acv ** 2
d_cb = 4 / 3 * lr_acv ** 2
b_sb = ((2 * g ** 2) / d_sb) ** (1 / 3) * nobs ** (1 / 3)
b_cb = ((2 * g ** 2) / d_cb) ** (1 / 3) * nobs ** (1 / 3)
b_sb = min(b_sb, b_max)
b_cb = min(b_cb, b_max)
return b_sb, b_cb | 7de0221ddc654d4f9e8ddd56d65f688c096a7784 | 2,040 |
def predict(params, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
A2, cache = forward_propagation(X, params)
predictions = np.round(A2)
return predictions | c647114ad415b2ae6c75f2fe2e207bf279775131 | 2,041 |
def response(request):
"""
返回相应对象
:param request:
:return:
"""
json_str = '{"name": "张三", "age": 18}' # 整体是个字符串
response = HttpResponse(json_str,
content_type="application/json",
status=200)
response["dev"] = "aGrass0825" # 向响应头中添加内容
return response | a44b35682ff8f5de168711730a10056653319512 | 2,042 |
def nest_to_flat_dict(nest):
"""Convert a nested structure into a flat dictionary.
Args:
nest: A nested structure.
Returns:
flat_dict: A dictionary with strings keys that can be converted back into
the original structure via `flat_dict_to_nest`.
"""
flat_sequence = tf.nest.flatten(nest)
return {str(k): v for k, v in enumerate(flat_sequence)} | f74308fc4f7c0b97d6524faea65915263a8ced9b | 2,043 |
import sys
def _live_tensors(f, attr_name="inputs"):
"""Returns the indices of the used inputs.
Note: This currently only handles direct index accesses e.g. op.inputs[1].
If the function has slicing or list comprehension on attr_name then returns
_ALL. This ensure that this is correct even if inefficient.
Args:
f: A grad function, taking the op as first argument.
attr_name: op attr to track. "inputs" or "outputs".
Returns:
Either one of:
* set of integers representing individual indices of inputs used
* the value _ALL, if indices are used but cannot be determined which
* empty set, if no inputs are used
"""
node, _ = parser.parse_entity(f, ())
entity_info = transformer.EntityInfo(
name=f.__name__,
source_code=None,
source_file=None,
future_features=(),
namespace=sys.modules[f.__module__].__dict__)
ctx = transformer.Context(entity_info, None, None)
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = liveness.resolve(node, ctx, graphs)
op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN)
op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name)
special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name,))
node = special_tracker.visit(node)
live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN)
inputs_outputs_used_qns = set()
for v in special_tracker.complex_reads:
# Complicated patterns like op.inputs[:3]. Could be smarter about them
# if they matter much.
if v == op_inputs_outputs_name:
return _ALL
for v in live_vars_in:
if v in special_tracker.reads:
if (v.has_subscript() and v.parent == op_inputs_outputs_name):
inputs_outputs_used_qns.add(v)
elif v == op_inputs_outputs_name:
# When op.{attr_name} is used directly, assume all tensors are
# used for now. In that case, no point digging further.
# TODO(mdan): We can descend into tuple expansions.
return _ALL
function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name)
node = function_calls_tracker.visit(node)
input_output_indices = set()
for called_f in function_calls_tracker.calls:
child_indices = _live_tensors(called_f, attr_name=attr_name)
if child_indices is _ALL:
return _ALL
input_output_indices |= child_indices
for v in inputs_outputs_used_qns:
assert v.has_subscript()
_, subscript = v.qn
if not subscript.is_simple():
# Not a number, assuming it can be anything.
return _ALL
subscript_val, = subscript.qn
if (not isinstance(subscript_val, qual_names.Literal) and
not isinstance(subscript_val.value, int)):
# Not a number, assuming it can be anything.
return _ALL
input_output_indices.add(subscript_val.value)
return input_output_indices | 6521965fe10f0c7ca76ea867c4b7478d138b9f41 | 2,044 |
import os
import sys
import configparser
def update_site_config(site_name, parameters):
"""Update the site config to establish the database settings"""
site_directory = os.path.join('web', 'sites', site_name)
if not os.path.isdir(site_directory):
print('site directory {} missing'.format(site_directory))
sys.exit(-1)
config_filename = os.path.join(site_directory, 'site.ini')
if os.path.exists(config_filename):
existing_config = configparser.ConfigParser()
existing_config.read(config_filename)
if existing_config.has_section('database'):
print('database settings already exist in {}'.format(
config_filename
))
print(existing_config.options('database'))
sys.exit(-1)
new_config = configparser.RawConfigParser()
new_config.add_section('database')
for key, value in parameters.items():
if key == 'database':
key = 'name'
new_config.set('database', key, value)
with open(config_filename, 'a') as configfile:
new_config.write(configfile)
return new_config | 8dce45257189cb5c4830f18fc1bcad388a193252 | 2,045 |
def plot_with_front(gen, front, title, fname):
"""
plot with front: Print the generation gen and front,
highlighting front as the pareto front on the graph.
Parameters:
gen: The generation to plot.
front: The pareto front extracted from generation gen
title: Plot Title
fname: path to output file for plot image.
"""
fig, ax = subplots()
plot_inds(ax,gen,'Non-Dominant')
plot_inds(ax,front,'Dominant')
ax.set_title(title)
ax.legend()
fig.savefig(fname)
return [fig, ax] | 6556a22c6484e4c96f79a14a770cca934f50e274 | 2,046 |
def find_closest_positive_divisor(a, b):
"""Return non-trivial integer divisor (bh) of (a) closest to (b) in abs(b-bh) such that a % bh == 0"""
assert a>0 and b>0
if a<=b:
return a
for k in range(0, a-b+1):
bh = b + k
if bh>1 and a % bh == 0:
return bh
bh = b - k
if bh>1 and a % bh == 0:
return bh
return a | 1a68e1767680f82db232095806adfe1c27fb956e | 2,047 |
def simplify_stl_names(decl):
"""Take common STL/Standard Library names and simplify them to help make the
stack trace look more readable and less like the graphics in the matrix.
"""
p = simplify_template_call(decl)
if p == []:
return decl
return p[0] + '<' + ', '.join(p[1:-1]) + '>::' + p[-1] | 53ea9c18e47ce4a7d922db74efdc45646441ea49 | 2,048 |
from typing import Sequence
from typing import Union
from typing import Callable
from typing import Optional
from typing import Tuple
def sample_switching_models(
models: Sequence,
usage_seq: Sequence,
X: Union[None, Sequence, Callable] = None,
initial_conditions: Optional[Tuple[Sequence, Sequence]] = None,
return_input: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
""" Sample from a non-stationary stochastic processes that switches between
different ARMA models at given times.
This functions sets the models' `history_` attribute appropriately to ensure
consistency across time.
Parameters
----------
models
Sequence of models to use.
usage_seq
Sequence identifying the model to use at each time steps. Models are
labeled from `0` to `len(models) - 1`.
X
If given, this overrides the input source for the models. If it is a
sequence, it should be at least as long as `len(usage_seq)`.
initial_conditions
A tuple, `(initial_y, initial_x)`, of recent samples of the output and
input sequences used to seed the simulation. If these are not provided,
they are assumed equal to zero.
return_input
If true, returns both output and input. If false (the default), returns only
the output.
Returns a sequence `Y` of generated samples. If `return_input` is true,
returns a tuple `(Y, X)` of generated output samples and input samples. If
the `U` parameter was used and was a sequence, the output `X` simply mirrors
the input.
"""
# check the inputs
if len(models) == 0:
raise ValueError("No models given.")
if np.min(usage_seq) < 0 or np.max(usage_seq) >= len(models):
raise ValueError("Invalid entry in usage_seq vector.")
# handle vector X
if X is not None and not callable(X):
if len(X) < len(usage_seq):
raise ValueError("Not enough input values in X.")
X_ret = X
X = sources.Stream(X)
have_X_ret = True
else:
X_ret = np.zeros(len(usage_seq))
have_X_ret = False
# handle default initial conditions
if initial_conditions is None:
initial_conditions = ([], [])
# generate the samples
Y_ret = np.zeros(len(usage_seq))
usage_rle = rle_encode(usage_seq)
ptr = 0
for model_id, n_samples in usage_rle:
model = models[model_id]
# ensure proper history
if ptr >= model.p:
history_y = np.copy(Y_ret[ptr - model.p : ptr])
else:
n_left = model.p - ptr
if len(initial_conditions[0]) >= n_left:
history_y = np.hstack((initial_conditions[0][-n_left:], Y_ret[:ptr]))
else:
history_y = np.hstack(
(
np.zeros(n_left - len(initial_conditions[0])),
initial_conditions[0],
Y_ret[:ptr],
)
)
if ptr >= model.q:
history_x = np.copy(X_ret[ptr - model.q : ptr])
else:
n_left = model.q - ptr
if len(initial_conditions[1]) >= n_left:
history_x = np.hstack((initial_conditions[1][-n_left:], X_ret[:ptr]))
else:
history_x = np.hstack(
(
np.zeros(n_left - len(initial_conditions[1])),
initial_conditions[1],
X_ret[:ptr],
)
)
model.history_ = (history_y, history_x)
# generate and store the samples from this model
crt_y, crt_x = model.transform(n_samples, X=X, return_input=True)
Y_ret[ptr : ptr + n_samples] = crt_y
if not have_X_ret:
X_ret[ptr : ptr + n_samples] = crt_x
ptr += n_samples
if return_input:
return Y_ret, X_ret
else:
return Y_ret | 472e20968fe835b01da57c4a0abab376c006094b | 2,049 |
def eval_per_class(c_dets, c_truths, overlap_thresh=0.5, eval_phrase=False):
""" Evaluation for each class.
Args:
c_dets: A dictionary of all detection results.
c_truths: A dictionary of all ground-truth annotations.
overlap_thresh: A float of the threshold used in IoU matching.
Returns:
scores_all: A list of numpy float array collecting the confidence scores
of both truth positives and false positives in each image.
tp_fp_labels_all: A list of numpy float array collecting the true
positives (=1) and false positives (=0) labels in each image.
num_gt_all: An integer of the total number of valid ground-truth boxes.
"""
num_gt_all = sum([len(c_truths[l]) for l in c_truths])
scores_all = []
tp_fp_labels_all = []
img_keys = []
for key in c_dets:
img_keys.append(key)
img_det = c_dets[key]
num_det = len(img_det)
scores = np.array([det['score'] for det in img_det])
tp_fp_labels = np.zeros(num_det, dtype=bool)
if key not in c_truths or all(scores<0):
# detections not in ground truth or detections have negative image level label, classified as false positives
scores_all.append(scores)
tp_fp_labels_all.append(tp_fp_labels)
continue
img_gt = c_truths[key]
if eval_phrase:
ious = np.array([[IoU(d['rect'], g['rect']) for g in img_gt] for d in img_det])
else:
ious = np.array([[min(IoU(d['subject_rect'], g['subject_rect']), IoU(d['object_rect'], g['object_rect'])) for g in img_gt] for d in img_det])
if ious.shape[1] > 0:
max_overlap_gt_ids = np.argmax(ious, axis=1)
is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool)
for i in range(num_det):
gt_id = max_overlap_gt_ids[i]
if ious[i, gt_id] >= overlap_thresh:
if not is_gt_box_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_box_detected[gt_id] = True
# if ious.shape[1] > 0:
# max_overlap_gt_ids = np.argsort(-1*ious, axis=1)
# is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool)
# for i in range(num_det):
# for gt_id in max_overlap_gt_ids[i, :]:
# if ious[i, gt_id] >= overlap_thresh:
# if not is_gt_box_detected[gt_id]:
# tp_fp_labels[i] = True
# is_gt_box_detected[gt_id] = True
# break
# else:
# break
# num_gt = len(img_gt)
# if ious.shape[1] > 0:
# max_overlap_det_ids = np.argsort(-1*ious, axis=0)
# is_det_box_used = np.zeros(ious.shape[0], dtype=bool)
# for i in range(num_gt):
# for det_id in max_overlap_det_ids[:, i]:
# if ious[det_id, i] >= overlap_thresh:
# if not is_det_box_used[det_id]:
# tp_fp_labels[det_id] = True
# is_det_box_used[det_id] = True
# break
# else:
# break
scores_all.append(scores)
tp_fp_labels_all.append(tp_fp_labels)
return scores_all, tp_fp_labels_all, num_gt_all, img_keys | 7884255c6fb45d6cb01b88edd5017d134f0344b0 | 2,050 |
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to describe
unit commitment for projects. Unless otherwise stated, all power
capacity is specified in units of MW and all sets and parameters
are mandatory.
-- Commit decision, limits, and headroom --
CommitProject[(proj, t) in PROJ_DISPATCH_POINTS] is a decision
variable of how much capacity (MW) from each project to commit in
each timepoint. By default, this operates in continuous mode.
Include the project.unitcommit.discrete module to force this to
operate with discrete unit commitment.
proj_max_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS]
describes the maximum commit level as a fraction of available
capacity (capacity that is built and expected to be available for
commitment; derated by annual expected outage rate). This has
limited use cases, but could be used to simulate outages (scheduled
or non-scheduled) in a production-cost simulation. This optional
parameter has a default value of 1.0, indicating that all available
capacity can be commited. If you wish to have discrete unit
commitment, I advise overriding the default behavior and specifying
a more discrete treatment of outages.
proj_min_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS]
describes the minimum commit level as a fraction of available
capacity. This is useful for describing must-run plants that ensure
reliable grid operations, and for forcing hydro plants operate at
some minimal level to maintain streamflow. This can also be used to
specify baseload plants that must be run year-round. This optional
parameter will default to proj_max_commit_fraction for generation
technologies marked baseload and 0 for all other generators.
CommitLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the minimum capacity that must be committed. This is
derived from installed capacity and proj_min_commit_fraction.
CommitUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the maximum capacity available for commitment. This
is derived from installed capacity and proj_max_commit_fraction.
Enforce_Commit_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and
Enforce_Commit_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are
constraints that limit CommitProject to the upper and lower bounds
defined above.
CommitLowerLimit <= CommitProject <= CommitUpperLimit
CommitSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the amount of additional capacity available for
commitment: CommitUpperLimit - CommitProject
CommitSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the amount of committed capacity that could be taken
offline: CommitProject - CommitLowerLimit
-- Startup and Shutdown --
The capacity started up or shutdown is completely determined by
the change in CommitProject from one hour to the next, but we can't
calculate these directly directly within the linear program because
linear programs don't have if statements. Instead, we'll define extra
decision variables that are tightly constrained. Since startup incurs
costs and shutdown does not, the linear program will not simultaneously
set both of these to non-zero values.
Startup[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable
describing how much additional capacity was brought online in a given
timepoint. Committing additional capacity incurs startup costs for
fossil plants from fuel requirements as well as additional O&M
costs.
Shutdown[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable
describing how much committed capacity to take offline in a given
timepoint.
Commit_Startup_Shutdown_Consistency[(proj, t) in
PROJ_DISPATCH_POINTS] is a constraint that forces consistency
between commitment decision from one hour to the next with startup
and shutdown.
g_startup_fuel[g in FUEL_BASED_GEN] describes fuel
requirements of starting up additional generation capacity expressed
in units of MMBTU / MW. This optional parameter has a default value
of 0.
proj_startup_fuel[proj in FUEL_BASED_PROJECTS] is the same as
g_startup_fuel except on a project basis. This optional parameter
defaults to g_startup_fuel.
g_startup_om[g in GENERATION_TECHNOLOGIES] describes operations and
maintenance costs incured from starting up additional generation
capacity expressed in units of $base_year / MW. This could represent
direct maintenance requirements or some overall depreciation rate
from accelerated wear and tear. This optional parameter has a
default value of 0.
proj_startup_om[proj in PROJECTS] is the same as g_startup_om except
on a project basis. This optional parameter defaults to g_startup_om.
Total_Startup_OM_Costs[t in TIMEPOINTS] is an expression for passing
total startup O&M costs to the sys_cost module.
-- Dispatch limits based on committed capacity --
g_min_load_fraction[g] describes the minimum loading level of a
generation technology as a fraction of committed capacity. Many
fossil plants - especially baseload - have a minimum run level which
should be stored here. Note that this is only applied to committed
capacity. This is an optional parameter that defaults to 1 for
generation technologies marked baseload and 0 for all other
generators. This parameter is only relevant when considering unit
commitment so it is defined here rather than the gen_tech module.
proj_min_cap_factor[(proj, t) in PROJ_DISPATCH_POINTS] describes the
minimum loadding level for each project and timepoint as a fraction
of committed capacity. This is an optional parameter that defaults
to g_min_load_fraction, which in turn defaults to 0. You may wish to
vary this by timepoint to establish minimum flow rates for
hydropower, to specify thermal demand for a cogeneration project, or
specify must-run reliability constraints in a geographically or
temporally detailed model. This could also be used to constrain
dispatch of distributed solar resources that cannot be curtailed by
the system operator.
DispatchLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] and
DispatchUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] are
expressions that define the lower and upper bounds of dispatch.
Lower bounds are calculated as CommitProject * proj_min_cap_factor,
and upper bounds are calculated relative to committed capacity and
renewable resource availability.
Enforce_Dispatch_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and
Enforce_Dispatch_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are
constraints that limit DispatchProj to the upper and lower bounds
defined above.
DispatchLowerLimit <= DispatchProj <= DispatchUpperLimit
DispatchSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the amount of additional commited capacity available
for dispatch: DispatchUpperLimit - DispatchProj
DispatchSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an
expression that describes the amount by which dispatch could be
lowered, that is how much downramp potential each project has
in each timepoint: DispatchProj - DispatchLowerLimit
"""
# Commitment decision, bounds and associated slack variables
mod.CommitProject = Var(
mod.PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.proj_max_commit_fraction = Param(
mod.PROJ_DISPATCH_POINTS,
within=PercentFraction,
default=lambda m, proj, t: 1.0)
mod.proj_min_commit_fraction = Param(
mod.PROJ_DISPATCH_POINTS,
within=PercentFraction,
default=lambda m, proj, t: (
m.proj_max_commit_fraction[proj, t]
if proj in m.BASELOAD_PROJECTS
else 0.0))
mod.CommitLowerLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.ProjCapacityTP[proj, t] * m.proj_availability[proj] *
m.proj_min_commit_fraction[proj, t]))
mod.CommitUpperLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.ProjCapacityTP[proj, t] * m.proj_availability[proj] *
m.proj_max_commit_fraction[proj, t]))
mod.Enforce_Commit_Lower_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.CommitLowerLimit[proj, t] <= m.CommitProject[proj, t]))
mod.Enforce_Commit_Upper_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.CommitProject[proj, t] <= m.CommitUpperLimit[proj, t]))
mod.CommitSlackUp = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.CommitUpperLimit[proj, t] - m.CommitProject[proj, t]))
mod.CommitSlackDown = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.CommitProject[proj, t] - m.CommitLowerLimit[proj, t]))
# Startup & Shutdown
mod.Startup = Var(
mod.PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.Shutdown = Var(
mod.PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.Commit_Startup_Shutdown_Consistency = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, pr, t: (
m.CommitProject[pr, m.tp_previous[t]] +
m.Startup[pr, t] - m.Shutdown[pr, t] == m.CommitProject[pr, t]))
mod.g_startup_fuel = Param(mod.FUEL_BASED_GEN, default=0.0)
mod.g_startup_om = Param(mod.GENERATION_TECHNOLOGIES, default=0.0)
mod.proj_startup_fuel = Param(
mod.FUEL_BASED_PROJECTS,
default=lambda m, pr: m.g_startup_fuel[m.proj_gen_tech[pr]])
mod.proj_startup_om = Param(
mod.PROJECTS,
default=lambda m, pr: m.g_startup_om[m.proj_gen_tech[pr]])
# Startup costs need to be divided over the duration of the
# timepoint because it is a one-time expenditure in units of $
# but cost_components_tp requires an hourly cost rate in $ / hr.
mod.Total_Startup_OM_Costs = Expression(
mod.TIMEPOINTS,
initialize=lambda m, t: sum(
m.proj_startup_om[proj] * m.Startup[proj, t] / m.tp_duration_hrs[t]
for (proj, t2) in m.PROJ_DISPATCH_POINTS
if t == t2))
mod.cost_components_tp.append('Total_Startup_OM_Costs')
# Dispatch limits relative to committed capacity.
mod.g_min_load_fraction = Param(
mod.GENERATION_TECHNOLOGIES,
within=PercentFraction,
default=lambda m, g: 1.0 if m.g_is_baseload[g] else 0.0)
mod.proj_min_load_fraction = Param(
mod.PROJ_DISPATCH_POINTS,
default=lambda m, pr, t: m.g_min_load_fraction[m.proj_gen_tech[pr]])
mod.DispatchLowerLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, pr, t: (
m.CommitProject[pr, t] * m.proj_min_load_fraction[pr, t]))
def DispatchUpperLimit_expr(m, pr, t):
if pr in m.VARIABLE_PROJECTS:
return m.CommitProject[pr, t] * m.prj_max_capacity_factor[pr, t]
else:
return m.CommitProject[pr, t]
mod.DispatchUpperLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=DispatchUpperLimit_expr)
mod.Enforce_Dispatch_Lower_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.DispatchLowerLimit[proj, t] <= m.DispatchProj[proj, t]))
mod.Enforce_Dispatch_Upper_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.DispatchProj[proj, t] <= m.DispatchUpperLimit[proj, t]))
mod.DispatchSlackUp = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.DispatchUpperLimit[proj, t] - m.DispatchProj[proj, t]))
mod.DispatchSlackDown = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.DispatchProj[proj, t] - m.DispatchLowerLimit[proj, t])) | 4ad0aae0df9a3953309138dfbc138f944efba74e | 2,051 |
def adjustwithin(df, pCol, withinCols, method='holm'):
"""Apply multiplicity adjustment to a "stacked"
pd.DataFrame, adjusting within groups defined by
combinations of unique values in withinCols
Parameters
----------
df : pd.DataFrame
Stacked DataFrame with one column of pvalues
and other columns to define groups for adjustment.
pCol : str
Column containing pvalues.
withinCols : list
Columns used to define subgroups/families for adjustment.
method : str
An adjustment method for sm.stats.multipletests.
Use 'holm' for Holm-Bonferroni FWER-adj and
'fdr_bh' for Benjamini and Hochberg FDR-adj
Returns
-------
adjSeries : pd.Series
Same shape[0] as df containing adjusted pvalues/adjpvalues."""
def _transformFunc(ser, method):
nonNan = ~ser.isnull()
if nonNan.sum() >= 1:
rej, adjp, alphas, alphab = sm.stats.multipletests(ser.loc[nonNan].values, method=method)
out = ser.copy(deep=True)
out.loc[nonNan] = adjp
return out
else:
return ser
if not len(withinCols) == 0:
gby = df[[pCol] + withinCols].groupby(withinCols)
adjDf = gby.transform(partial(_transformFunc, method=method))
# adjDf = df.drop(pCol, axis=1).join(adjDf)
else:
adjDf = pd.Series(adjustnonnan(df.loc[:, pCol], method=method), index=df.index, name='adjusted-pvalue')
return adjDf | 4040c53def07ce5353c111036887b5df4666684c | 2,052 |
def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query | 252d2ccfb2fb15db041e97908c982dae9bf3c1ef | 2,053 |
import torch
import math
def sample_random_lightdirs(num_rays, num_samples, upper_only=False):
"""Randomly sample directions in the unit sphere.
Args:
num_rays: int or tensor shape dimension. Number of rays.
num_samples: int or tensor shape dimension. Number of samples per ray.
upper_only: bool. Whether to sample only on the upper hemisphere.
Returns:
lightdirs: [R, S, 3] float tensor. Random light directions sampled from the unit
sphere for each sampled point.
"""
if upper_only:
min_z = 0
else:
min_z = -1
phi = torch.rand(num_rays, num_samples) * (2 * math.pi) # [R, S]
cos_theta = torch.rand(num_rays, num_samples) * (1 - min_z) + min_z # [R, S]
theta = torch.acos(cos_theta) # [R, S]
x = torch.sin(theta) * torch.cos(phi)
y = torch.sin(theta) * torch.sin(phi)
z = torch.cos(theta)
lightdirs = torch.cat((x[..., None], y[..., None], z[..., None]), dim=-1) # [R, S, 3]
return lightdirs | 7f7657ff66d0cffea6892dffdf49ba6b52b9def9 | 2,054 |
def gaussgen(sigma):
"""
Function to generate Gaussian kernels, in 1D, 2D and 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor)
:return: Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * max(sigma));
x = range(np.single(-halfsize), np.single(halfsize + 1));
dim = len(sigma);
if dim == 1:
x = x.astype(float);
k = np.exp(-x ** 2 / (2 * sigma ^ 2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2));
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)) * np.exp(
-Z ** 2 / (2 * sigma[2] ** 2));
else:
print
'Only supports up to dimension 3'
return np.divide(k, np.sum(np.abs(k))); | 7673e3fb8ddbb7bbb646331a24380581a7af9617 | 2,055 |
import types
from typing import List
def metrics_specs_from_keras(
model_name: Text,
model_loader: types.ModelLoader,
) -> List[config.MetricsSpec]:
"""Returns metrics specs for metrics and losses associated with the model."""
model = model_loader.construct_fn()
if model is None:
return []
metric_names = []
metrics = []
if hasattr(model, 'loss_functions'):
# Legacy keras metrics separate the losses from the metrics and store them
# under loss_functions. The first name in metric_names is always 'loss'
# followed by the loss_function names (prefixed by output_name if multiple
# outputs) and then followed by the metric names (also prefixed by output
# name). Note that names in loss_functions will not have any output name
# prefixes (if used) while the metrics will so we need to use the names in
# metric_names for matching with outputs not the names in the functions.
metric_names = model.metrics_names
metrics.extend(model.loss_functions)
metrics.extend(model.metrics)
if len(metric_names) > len(metrics) and metric_names[0] == 'loss':
metric_names = metric_names[1:]
elif hasattr(model, 'compiled_loss') and hasattr(model, 'compiled_metrics'):
# In the new keras metric setup the metrics include the losses (in the form
# of a metric type not a loss type) and the metrics_names align with the
# names in the metric classes. The metrics itself contains compiled_loss,
# compiled_metrics, and custom metrics (added via add_metric). Since we only
# care about compiled metrics we use these APIs instead. Note that the
# overall loss metric is an average of the other losses which doesn't take
# y_true, y_pred as inputs so it can't be calculated via standard inputs so
# we remove it.
metrics.extend(model.compiled_loss.metrics[1:])
metrics.extend(model.compiled_metrics.metrics)
metric_names = [m.name for m in metrics]
specs = []
# Need to check if model.output_names exists because the keras Sequential
# model doesn't always contain output_names (b/150510258).
if hasattr(model, 'output_names') and len(model.output_names) > 1:
unmatched_metrics = {m for m in metrics}
for output_name in model.output_names:
per_output_metrics = []
for (name, metric) in zip(metric_names, metrics):
if name.startswith(output_name + '_'):
per_output_metrics.append(metric)
unmatched_metrics.remove(metric)
if per_output_metrics:
specs.extend(
metric_specs.specs_from_metrics(
metrics=per_output_metrics,
model_names=[model_name],
output_names=[output_name],
include_example_count=False,
include_weighted_example_count=False))
metrics = list(unmatched_metrics)
if metrics:
specs.extend(
metric_specs.specs_from_metrics(
metrics=metrics,
model_names=[model_name],
include_example_count=False,
include_weighted_example_count=False))
return specs | fd471d20782507e983abec5610115e83c59ed7e0 | 2,056 |
def __main__(recipe, params):
"""
Main code: should only call recipe and params (defined from main)
:param recipe:
:param params:
:return:
"""
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
# This is just a test
if 'TEXT' in params['INPUTS']:
if params['INPUTS']['TEXT'] not in ['None', None, '']:
WLOG(params, '', params['INPUTS']['TEXT'])
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, locals()) | 3e9fc1006457be759e1e0b05f36c00297f0c5f4c | 2,057 |
def AICrss(n, k, rss):
"""Calculate the Akaike Information Criterion value, using:
- n: number of observations
- k: number of parameters
- rss: residual sum of squares
"""
return n * log((2 * pi) / n) + n + 2 + n * log(rss) + 2 * k | 988345930a8544d2979b99d6400198d3a59fa85c | 2,058 |
import sys
import os
def CONVERT_OUT(s):
"""
convert a directory of module into a reponsed output directory
if s doesn't beneath the module, raise NotInSelfModuleError
Args:
s : a relative directory beneathed the module
Returns:
return the relative path of responsed output directory
"""
if sys.argv[0] == 'PLANISH':
return ""
env = Environment.GetCurrent()
_s = os.path.normpath(os.path.join(env.BrocDir(), s))
# to check whether _s beneathes directory of module
if env.ModulePath() not in _s:
raise NotInSelfModuleError(env.BrocDir(), _s)
return os.path.normpath(os.path.join('broc_out', env.BrocCVSDir(), s)) | d6a618ef5183a93b78d1f260bf130832b2751dd1 | 2,059 |
def random_traveling_salesman(points, distmat, avg_edges=None, start=None,
max_perm_samples=2e3, end=None, debug=0):
"""
Finds the shortest route to visit all the cities by bruteforce.
Time complexity is O(N!), so never use on long lists.
We use a limit of max_perm_samples (default=2k) random samples of the
permutation space of all possible routes and the select the route with
the minimal overall route distance.
Args:
points,
distmat (np.matrix): the matrix of distances between all stops in
the field of interest.
start=None,
max_perm_samples=2e3,
end=None,
debug=0
Returns:
path (np.array): ordered points optimized according to distmat
"""
if start is None:
start = points[0]
npoints = len(points)
if avg_edges is None:
nnodes = distmat.shape[0]
nedges = sum([(~np.isinf(distmat[k, k+1:])).sum() for k in range(nnodes)])
avg_edges = int(nedges/nnodes) + 1
# attempt to estimate the number of possible routes given the average
# number of edges per node
nroutes_test = min(int(max_perm_samples), avg_edges**npoints)
if debug:
print(f'drawing {nroutes_test} random routes to test')
# construct a limited set of random permutations
if not(isinstance(points, np.ndarray)):
points = np.asarray(points)
else:
points = points.copy()
this_perm = points
# permutes = []
best_permute = None
nvalid_found = 0
best = np.inf
while nvalid_found < nroutes_test: # len(best_permute) < nroutes_test:
np.random.shuffle(this_perm)
if this_perm[0] == start:
nvalid_found += 1
# permutes.append(this_perm.copy())
length = total_distance(this_perm, distmat)
if length < best:
best = length
best_permute = this_perm.copy()
# total_dist = np.zeros(len(permutes))
# if debug:
# print(total_dist)
# for pidx, perm in enumerate(permutes):
# total_dist[pidx] = total_distance(perm, distmat)
# path = permutes[np.argsort(total_dist)[0]]
path = best_permute
if end is not None:
path = path.tolist()
path.append(end)
return np.asarray(path)
else:
return path | 3096962cb73a30e782ee110fbf23db7aa82fdcda | 2,060 |
from typing import Optional
def get_username_from_access_token(token: str, secret_key: str, algorithm: str) -> Optional[str]:
"""
Decodes a token and returns the "sub" (= username) of the decoded token
:param token: JWT access token
:param secret_key: The secret key that should be used for token decoding
:param algorithm: The algorith that should be used for token decoding (like HS256)
:return: Username
"""
try:
payload = jwt.decode(token, secret_key, algorithms=[algorithm])
username: str = payload.get("sub")
if not username:
raise credentials_exception
return username
except (JWTError, ExpiredSignatureError, JWTClaimsError):
raise credentials_exception | 461ce205b43961af25c77af4d3902d1342bba32a | 2,061 |
import re
from os.path import dirname, join
def _inject_getter_attrs(
metaself,
objname,
attrs,
configurable_attrs,
depc_name=None,
depcache_attrs=None,
settable_attrs=None,
aliased_attrs=None,
):
"""
Used by the metaclass to inject methods and properties into the class
inheriting from ObjectList1D
"""
if settable_attrs is None:
settable_attrs = []
settable_attrs = set(settable_attrs)
# Inform the class of which variables will be injected
metaself._settable_attrs = settable_attrs
metaself._attrs = attrs
metaself._configurable_attrs = configurable_attrs
if depcache_attrs is None:
metaself._depcache_attrs = []
else:
metaself._depcache_attrs = ['%s_%s' % (tbl, col) for tbl, col in depcache_attrs]
if aliased_attrs is not None:
metaself._attrs_aliases = aliased_attrs
else:
metaself._attrs_aliases = {}
# if not getattr(metaself, '__needs_inject__', True):
# return
attr_to_aliases = ut.invert_dict(metaself._attrs_aliases, unique_vals=False)
# What is difference between configurable and depcache getters?
# Could depcache getters just be made configurable?
# I guess its just an efficincy thing. Actually its config2_-vs-config
# FIXME: rectify differences between normal / configurable / depcache
# getter
def _make_caching_setter(attrname, _rowid_setter):
def _setter(self, values, *args, **kwargs):
if self._ibs is None:
self._internal_attrs[attrname] = values
else:
if self._caching and attrname in self._internal_attrs:
self._internal_attrs[attrname] = values
_rowid_setter(self, self._rowids, values)
ut.set_funcname(_setter, '_set_' + attrname)
return _setter
def _make_caching_getter(attrname, _rowid_getter):
def _getter(self):
if self._ibs is None or (self._caching and attrname in self._internal_attrs):
data = self._internal_attrs[attrname]
else:
data = _rowid_getter(self, self._rowids)
if self._caching:
self._internal_attrs[attrname] = data
return data
ut.set_funcname(_getter, '_get_' + attrname)
return _getter
# make default version use implicit rowids and another
# that takes explicit rowids.
def _make_setters(objname, attrname):
ibs_funcname = 'set_%s_%s' % (objname, attrname)
def _rowid_setter(self, rowids, values, *args, **kwargs):
ibs_callable = getattr(self._ibs, ibs_funcname)
ibs_callable(rowids, values, *args, **kwargs)
ut.set_funcname(_rowid_setter, '_rowid_set_' + attrname)
_setter = _make_caching_setter(attrname, _rowid_setter)
return _rowid_setter, _setter
# ---
def _make_getters(objname, attrname):
ibs_funcname = 'get_%s_%s' % (objname, attrname)
def _rowid_getter(self, rowids):
ibs_callable = getattr(self._ibs, ibs_funcname)
data = ibs_callable(rowids)
if self._asarray:
data = np.array(data)
return data
ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname)
_getter = _make_caching_getter(attrname, _rowid_getter)
return _rowid_getter, _getter
def _make_cfg_getters(objname, attrname):
ibs_funcname = 'get_%s_%s' % (objname, attrname)
def _rowid_getter(self, rowids):
ibs_callable = getattr(self._ibs, ibs_funcname)
data = ibs_callable(rowids, config2_=self._config)
if self._asarray:
data = np.array(data)
return data
ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname)
_getter = _make_caching_getter(attrname, _rowid_getter)
return _rowid_getter, _getter
def _make_depc_getters(depc_name, attrname, tbl, col):
def _rowid_getter(self, rowids):
depc = getattr(self._ibs, depc_name)
data = depc.get(tbl, rowids, col, config=self._config)
if self._asarray:
data = np.array(data)
return data
ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname)
_getter = _make_caching_getter(attrname, _rowid_getter)
return _rowid_getter, _getter
# Collect setter / getter functions and properties
rowid_getters = []
getters = []
setters = []
properties = []
for attrname in attrs:
_rowid_getter, _getter = _make_getters(objname, attrname)
if attrname in settable_attrs:
_rowid_setter, _setter = _make_setters(objname, attrname)
setters.append(_setter)
else:
_setter = None
prop = property(fget=_getter, fset=_setter)
rowid_getters.append((attrname, _rowid_getter))
getters.append(_getter)
properties.append((attrname, prop))
for attrname in configurable_attrs:
_rowid_getter, _getter = _make_cfg_getters(objname, attrname)
prop = property(fget=_getter)
rowid_getters.append((attrname, _rowid_getter))
getters.append(_getter)
properties.append((attrname, prop))
if depcache_attrs is not None:
for tbl, col in depcache_attrs:
attrname = '%s_%s' % (tbl, col)
_rowid_getter, _getter = _make_depc_getters(depc_name, attrname, tbl, col)
prop = property(fget=_getter, fset=None)
rowid_getters.append((attrname, _rowid_getter))
getters.append(_getter)
properties.append((attrname, prop))
aliases = []
# Inject all gathered information
for attrname, func in rowid_getters:
funcname = ut.get_funcname(func)
setattr(metaself, funcname, func)
# ensure aliases have rowid getters
for alias in attr_to_aliases.get(attrname, []):
alias_funcname = '_rowid_get_' + alias
setattr(metaself, alias_funcname, func)
for func in getters:
funcname = ut.get_funcname(func)
setattr(metaself, funcname, func)
for func in setters:
funcname = ut.get_funcname(func)
setattr(metaself, funcname, func)
for attrname, prop in properties:
setattr(metaself, attrname, prop)
for alias in attr_to_aliases.pop(attrname, []):
aliases.append((alias, attrname))
setattr(metaself, alias, prop)
if ut.get_argflag('--autogen-core'):
# TODO: turn on autogenertion given a flag
def expand_closure_source(funcname, func):
source = ut.get_func_sourcecode(func)
closure_vars = [
(k, v.cell_contents)
for k, v in zip(func.func_code.co_freevars, func.func_closure)
]
source = ut.unindent(source)
for k, v in closure_vars:
source = re.sub('\\b' + k + '\\b', ut.repr2(v), source)
source = re.sub(r'def .*\(self', 'def ' + funcname + '(self', source)
source = ut.indent(source.strip(), ' ') + '\n'
return source
explicit_lines = []
# build explicit version for jedi?
for funcname, func in getters:
source = expand_closure_source(funcname, func)
explicit_lines.append(source)
# build explicit version for jedi?
for funcname, func in setters:
source = expand_closure_source(funcname, func)
explicit_lines.append(source)
for attrname, prop in properties:
getter_name = None if prop.fget is None else ut.get_funcname(prop.fget)
setter_name = None if prop.fset is None else ut.get_funcname(prop.fset)
source = ' %s = property(%s, %s)' % (attrname, getter_name, setter_name)
explicit_lines.append(source)
for alias, attrname in aliases:
source = ' %s = %s' % (alias, attrname)
explicit_lines.append(source)
explicit_source = (
'\n'.join(
[
'from wbia import _wbia_object',
'',
'',
'class _%s_base_class(_wbia_object.ObjectList1D):',
' __needs_inject__ = False',
'',
]
)
% (objname,)
)
explicit_source += '\n'.join(explicit_lines)
explicit_fname = '_autogen_%s_base.py' % (objname,)
ut.writeto(join(dirname(__file__), explicit_fname), explicit_source + '\n')
if attr_to_aliases:
raise AssertionError('Unmapped aliases %r' % (attr_to_aliases,)) | 48511de7639dd6de9e6f42c9e4779466d03315fe | 2,062 |
def date_handler(obj):
"""make datetime object json serializable.
Notes
-----
Taken from here: https://tinyurl.com/yd84fqlw
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError | 741867e05e1b5f3e9d0e042b3b1576fb61ab0219 | 2,063 |
def has_type(typestmt, names):
"""Return type with name if `type` has name as one of its base types,
and name is in the `names` list. otherwise, return None."""
if typestmt.arg in names:
return typestmt
for t in typestmt.search('type'): # check all union's member types
r = has_type(t, names)
if r is not None:
return r
typedef = getattr(typestmt, 'i_typedef', None)
if typedef is not None and getattr(typedef, 'i_is_circular', None) is False:
t = typedef.search_one('type')
if t is not None:
return has_type(t, names)
return None | d534331df62f76efdcbb93be52eb57ee600a7783 | 2,064 |
def generic_ecsv(file_name, column_mapping=None, **kwargs):
"""
Read a spectrum from an ECSV file, using generic_spectrum_from_table_loader()
to try to figure out which column is which.
The ECSV columns must have units, as `generic_spectrum_from_table_loader`
depends on this to determine the meaning of the columns. For manual
control over the column to spectrum mapping, use the ASCII loader.
Parameters
----------
file_name: str
The path to the ECSV file.
column_mapping : dict
A dictionary describing the relation between the ECSV file columns
and the arguments of the `Spectrum1D` class, along with unit
information. The dictionary keys should be the ECSV file column names
while the values should be a two-tuple where the first element is the
associated `Spectrum1D` keyword argument, and the second element is the
unit for the ECSV file column::
column_mapping = {'FLUX': ('flux': 'Jy')}
Returns
-------
data: Spectrum1D
The spectrum that is represented by the data in this table.
"""
table = Table.read(file_name, format='ascii.ecsv')
if column_mapping is None:
return generic_spectrum_from_table(table, **kwargs)
return spectrum_from_column_mapping(table, column_mapping) | 0c9ac3a8d31a449e698907e02ad4715868844403 | 2,065 |
def parse_valuation_line(s, encoding=None):
"""
Parse a line in a valuation file.
Lines are expected to be of the form::
noosa => n
girl => {g1, g2}
chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
:param s: input line
:type s: str
:param encoding: the encoding of the input string, if it is binary
:type encoding: str
:return: a pair (symbol, value)
:rtype: tuple
"""
if encoding is not None:
s = s.decode(encoding)
pieces = _VAL_SPLIT_RE.split(s)
symbol = pieces[0]
value = pieces[1]
# check whether the value is meant to be a set
if value.startswith('{'):
value = value[1:-1]
tuple_strings = _TUPLES_RE.findall(value)
# are the set elements tuples?
if tuple_strings:
set_elements = []
for ts in tuple_strings:
ts = ts[1:-1]
element = tuple(_ELEMENT_SPLIT_RE.split(ts))
set_elements.append(element)
else:
set_elements = _ELEMENT_SPLIT_RE.split(value)
value = set(set_elements)
return symbol, value | aebd7ca9e4e321069a04536f281230b5cd23cceb | 2,066 |
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def scrape_dailykos(keywords=KEYWORDS):
"""
Scrapes news article titles from dailykos.com
"""
dk_request = requests.get('https://www.dailykos.com')
dk_homepage = dk_request.content
dk_soup = BeautifulSoup(dk_homepage, 'html.parser')
dk_tags = dk_soup.find_all('div', class_='cell-wrapper')
dk_links = ['https://www.dailykos.com' + tag.find('a')['href'] for tag in dk_tags]
dk_links = [link for link in dk_links if any(keyword in link for keyword in keywords)]
# get article titles and dates
dk_titles = []
dk_dates = []
for link in dk_links:
# prep article content
article = requests.get(link)
article_content = article.content
soup_article = BeautifulSoup(article_content, 'html5lib')
# get article title
dk_titles.append(soup_article.find('title').get_text())
# get publication date
date = str(soup_article.find('span', class_='timestamp'))
dk_dates.append(date[len(date) - 21:-7])
# format dates
dk_dates = [datetime.datetime.strptime(date, '%B %d, %Y').strftime('%Y-%m-%d') for date in dk_dates]
# assembling data
dailykos_data = pd.DataFrame.from_dict({
'publisher': 'dailykos',
'date': dk_dates,
'link': dk_links,
'article_title': dk_titles
})
dailykos_data.drop_duplicates(inplace=True)
return dailykos_data | a6b5cbffce87f75c7561bc8939247f80bb10ae11 | 2,067 |
def parse_rows(m: utils.Matrix[str]) -> pd.DataFrame:
"""Parse rows to DataFrame, expecting specific columns and types."""
if len(m) < 2:
logger.error('More than one line expected in {}'.format(str(m)))
return pd.DataFrame()
# parse data rows and add type casting
cols = len(m[0])
df = pd.DataFrame([row for row in m[1:] if len(row) == cols],
columns=m[0])
pairs = (('Market Value', utils.str_to_float),
('Weight (%)', utils.str_to_float),
('Notional Value', utils.str_to_float),
('Shares', utils.str_to_int),
('Price', utils.str_to_float),
('FX Rate', utils.str_to_float),
('Accrual Date', utils.parse_date_name)
)
for col, f in pairs:
try:
df[col] = df[col].apply(f)
except Exception as e:
logger.error('Error when casting {}: {}'.format(col, e))
return df | 46749bccf7af71256e1f1d490e1a2f241ed0c4d9 | 2,068 |
import base64
import struct
def tiny_id(page_id):
"""Return *tiny link* ID for the given page ID."""
return base64.b64encode(struct.pack('<L', int(page_id)).rstrip(b'\0'), altchars=b'_-').rstrip(b'=').decode('ascii') | 1a37b814ff9845949c3999999b61f79b26dacfdc | 2,069 |
def invert_color(color: str, *, black_or_white: bool = False) -> str:
"""Return a color with opposite red, green and blue values.
Example: ``invert_color('white')`` is ``'#000000'`` (black).
This function uses tkinter for converting the color to RGB. That's
why a tkinter root window must have been created, but *color* can be
any Tk-compatible color string, like a color name or a ``'#rrggbb'``
string. The return value is always a ``'#rrggbb`` string (also compatible
with Tk).
If ``black_or_white=True`` is set, then the result is always ``"#000000"``
(black) or ``"#ffffff"`` (white), depending on whether the color is bright
or dark.
"""
if black_or_white:
return "#000000" if is_bright(color) else "#ffffff"
widget = porcupine.get_main_window() # any widget would do
# tkinter uses 16-bit colors, convert them to 8-bit
r, g, b = (value >> 8 for value in widget.winfo_rgb(color))
return "#%02x%02x%02x" % (0xFF - r, 0xFF - g, 0xFF - b) | cf6a84957489cba046aebc01457bfd6453bc90b6 | 2,070 |
def pcaImageCube(ref, mask = None, pcNum = None, cube=True, ref3D=True, outputEval = False):
"""Principal Component Analysis,
Input:
ref: Cube of references, 3D;
if ref3D==False, 2D (Flattened and Normalized, with maksked region excluded.)
mask: mask, 2D or 1D;
pcNum: how many principal components are needed;
cube: output as a cube? Otherwise a flattend 2D component array will be returned.
ref3D: Ture by default.
outputEval: whether to return the eigen values, False by default.
Output:
The principal components, either cube (3D) or flattend (2D)."""
if mask is None:
mask = np.ones(ref[0].shape)
if pcNum is None:
pcNum = ref.shape[0]
if ref3D:
mask_flat = mask.flatten()
ref_flat = np.zeros((ref.shape[0], np.where(mask_flat == 1)[0].shape[0]))
for i in range(ref_flat.shape[0]):
ref_flat[i], std = flattenAndNormalize(ref[i], mask)
else:
ref_flat = ref
if np.shape(mask.shape)[0] == 1: #1D mask, already flattened
mask_flat = mask
elif np.shape(mask.shape)[0] == 2: #2D mask, need flatten
mask_flat = mask.flatten()
covMatrix = np.dot(ref_flat, np.transpose(ref_flat))
eVal, eVec = np.linalg.eig(covMatrix)
index = (-eVal).argsort()[:pcNum]
eVec = eVec[:,index]
components_flatten = np.dot(np.transpose(eVec), ref_flat)
pc_flat = np.zeros((pcNum, mask_flat.shape[0]))
for i in range(pc_flat.shape[0]):
pc_flat[i][np.where(mask_flat==1)] = components_flatten[i]/np.sqrt(np.dot(components_flatten[i], np.transpose(components_flatten[i])))
if cube == False:
return pc_flat
pc_cube = np.zeros((pcNum, mask.shape[0], mask.shape[1]))
width = mask.shape[0]
for i in range(pc_flat.shape[0]):
pc_cube[i] = np.array(np.split(pc_flat[i], width))
if not outputEval:
return pc_cube
else:
return pc_cube, eVal[index] | 96a05ef8fd6a618af91903b9c0fc9fc49cfd8130 | 2,071 |
def get_cross_kerr_table(epr, swp_variable, numeric):
"""
Function to re-organize the cross-Kerr results once the quantum analysis is finished
Parameters:
-------------------
epr : Object of QuantumAnalysis class
swp_variable : the variable swept in data according to which things will be sorted
numeric : Whether numerical diagonalization of the data was performed
Use notes:
-------------------
* It is assumed the epr.analyze_all_variations has already been called and analysis is finished.
"""
if numeric:
f1 = epr.results.get_frequencies_ND(vs=swp_variable)
chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable)
else:
f1 = epr.results.get_frequencies_O1(vs=swp_variable)
chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable)
#print(f1)
#print(chis)
swp_indices = chis.index.levels[0]
mode_indices = chis.index.levels[1]
#print(mode_indices)
mode_combinations = list(zip(mode_indices,mode_indices))
diff_mode_combinations = list(it.combinations_with_replacement(mode_indices,2))
mode_combinations.extend(diff_mode_combinations)
organized_data = pd.DataFrame({swp_variable:swp_indices})
organized_data.set_index(swp_variable,inplace=True)
for mode_indx in mode_indices:
organized_data['f_'+str(mode_indx)+'(GHz)']=np.round(f1.loc[mode_indx].values/1000,3)
for combo_indx in mode_combinations:
temp_chi_list = [chis.loc[swp_indx].loc[combo_indx] for swp_indx in swp_indices]
organized_data['chi_'+str(combo_indx[0])+str(combo_indx[1])+' (MHz)']=np.round(temp_chi_list,4)
return organized_data | 8dfa860f73c5453ee970f204d4e03d6cef93d010 | 2,072 |
def getSpectra(dataframe, indices):
""" Returns the files for training and testing
Inputs
-----------
dataframe: pd.DataFrame object from which we need to get spectra
indices: row values for which we need the spectra
Returns
-----------
spec_vals: pd.DataFrame object containing spectra values for given
indices
"""
colList = dataframe.columns
spec_inds = [index for index in range(len(colList))
if colList[index].startswith('Spectrum_')]
spec_cols = colList[spec_inds]
spec_vals = dataframe[spec_cols].iloc[indices]
return spec_vals | 606757ffdde39c0847dd0402342441931d66a081 | 2,073 |
def config2():
"""Configure for one of the restart tests."""
return Config.load(f"""
id: cbc_binary_toolkit
version: 0.0.1
database:
_provider: tests.component.persistor_fixtures.mock_persistor.MockPersistorFactory
engine:
_provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory
name: {ENGINE_NAME}
feed_id: {FEED_ID}
type: local
Test: TestPassed
""") | ded0b43392e7e0308cca0f773d2ed687fd0818de | 2,074 |
import sys
import traceback
def _on_process(*args, **kwargs):
"""Process the given function in the current subprocess"""
try:
func = kwargs['__func__']
del kwargs['__func__']
return func(*args, **kwargs)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
raise type(e)(traceback.format_exc()) | cc6b90daa3aba127f7c9ea596b0718bdefc5688b | 2,075 |
def diff_cases(couch_cases, log_cases=False):
"""Diff cases and return diff data
:param couch_cases: dict `{<case_id>: <case_json>, ...}`
:returns: `DiffData`
"""
assert isinstance(couch_cases, dict), repr(couch_cases)[:100]
assert "_diff_state" in globals()
data = DiffData()
dd_count = partial(metrics_counter, tags={"domain": get_domain()})
case_ids = list(couch_cases)
sql_case_ids = set()
for sql_case in CaseAccessorSQL.get_cases(case_ids):
case_id = sql_case.case_id
sql_case_ids.add(case_id)
couch_case, diffs, changes = diff_case(sql_case, couch_cases[case_id], dd_count)
if diffs:
dd_count("commcare.couchsqlmigration.case.has_diff")
if changes:
dd_count("commcare.couchsqlmigration.case.did_change")
data.doc_ids.append(case_id)
data.diffs.append((couch_case['doc_type'], case_id, diffs))
data.changes.append((couch_case['doc_type'], case_id, changes))
if log_cases:
log.info("case %s -> %s diffs", case_id, len(diffs))
diffs, changes = diff_ledgers(case_ids, dd_count)
data.diffs.extend(diffs)
data.changes.extend(changes)
add_missing_docs(data, couch_cases, sql_case_ids, dd_count)
return data | 545b35b7e37174f93df9e566bc0e1cd777948563 | 2,076 |
def rk4(a, b, x0, y0, nu=0, F=0, xdot = x_dot, ydot = y_dot):
"""rk(a, b, x0, y0, nu=0, F=0, xdot = x_dot, ydot = y_dot)
Args:
a (float) : Lower bound, t = a*2*pi
b (float) : Upper bound, t = b*2*pi
x0 (float) : Initial position of ball
y0 (float) : Initial velocity of ball
nu (float) : Constant damping coefficient
F (float) : Constant force amplitude coefficient
xdot (function) : Part of the differential equation
ydot (function) : Part of the differential equation
Returns:
t (array) : Array over the time interval with equal dt = .001
x (array) : Array containing the position of the ball at each time in the time array
y (array) : Array containing the velocity of the ball at each time in the time array
"""
dt = 0.001
start = 2*a*np.pi
end = 2*b*np.pi
n = int(np.ceil((end-start)/dt))
t = np.linspace(start,end,n)
x = np.zeros(n)
y = np.zeros(n)
x_dot_vec = np.zeros(n)
y_dot_vec = np.zeros(n)
x[0] = x0
y[0] = y0
for k in range(n):
x_dot_vec[k] = x_dot(y[k])
y_dot_vec[k] = ydot(t[k],y[k],x[k],nu,F)
if k == n-1:
break
else:
k1y = dt*ydot(t[k],y[k],x[k],nu,F)
k2y = dt*ydot((t[k]+dt/2),(y[k]+k1y/2),x[k],nu,F)
k3y = dt*ydot((t[k]+dt/2),(y[k]+k2y/2),x[k],nu,F)
k4y = dt*ydot((t[k]+dt),(y[k]+k3y),x[k],nu,F)
rky = (k1y+(2*k2y)+(2*k3y)+k4y)/6
y[k+1] = y[k]+rky
k1x = dt*xdot(y[k])
k2x = dt*xdot(y[k]+k1x/2)
k3x = dt*xdot(y[k]+k2x/2)
k4x = dt*xdot(y[k]+k3x)
rkx = (k1x+(2*k2x)+(2*k3x)+k4x)/6
x[k+1] = x[k]+rkx
return (t,x,y) | acd97edb74bc27d03908962e52431bc3fdb7a571 | 2,077 |
from furious.async import Async
def decode_callbacks(encoded_callbacks):
"""Decode the callbacks to an executable form."""
callbacks = {}
for event, callback in encoded_callbacks.iteritems():
if isinstance(callback, dict):
async_type = Async
if '_type' in callback:
async_type = path_to_reference(callback['_type'])
callback = async_type.from_dict(callback)
else:
callback = path_to_reference(callback)
callbacks[event] = callback
return callbacks | 0ff066a21bb2f0c0e0979898d218add1e46da544 | 2,078 |
def create_conv_block(
use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams,
is_training, freeze_batchnorm, depth):
"""Create Keras layers for depthwise & non-depthwise convolutions.
Args:
use_depthwise: Whether to use depthwise separable conv instead of regular
conv.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters. Can be an int if both values are the same.
padding: One of 'VALID' or 'SAME'.
stride: A list of length 2: [stride_height, stride_width], specifying the
convolution stride. Can be an int if both strides are the same.
layer_name: String. The name of the layer.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
depth: Depth of output feature maps.
Returns:
A list of conv layers.
"""
layers = []
if use_depthwise:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
depth, [kernel_size, kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**kwargs))
else:
layers.append(tf.keras.layers.Conv2D(
depth,
[kernel_size, kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
return layers | 08c45a1ca62ff290d5e34e1cb544618dababaad1 | 2,079 |
def select_eps_for_division(dtype):
"""Selects default values for epsilon to make divisions safe based on dtype.
This function returns an epsilon slightly greater than the smallest positive
floating number that is representable for the given dtype. This is mainly used
to prevent division by zero, which produces Inf values. However, if the
nominator is orders of magnitude greater than `1.0`, eps should also be
increased accordingly. Only floating types are supported.
Args:
dtype: The `tf.DType` of the tensor to which eps will be added.
Raises:
ValueError: If `dtype` is not a floating type.
Returns:
A `float` to be used to make operations safe.
"""
return 10.0 * np.finfo(dtype.as_numpy_dtype).tiny | 7204b2b694c6df98af4608562616655b3c198178 | 2,080 |
import os
import json
def train_freezed_model(x, y, x_tk, y_tk, freezed_comp='encoder', use_check_point=False):
"""
train the translation model and save checkpoint
:param x: Preprocessed English data
:param y: Preprocessed French data
:param x_tk: English tokenizer
:param y_tk: French tokenizer
:param freezed_comp: which component in the model is freezed
:param use_check_point: whether save model weights to file or not
"""
mode_constructor = freezed_encoder_model if freezed_comp == 'encoder' else freezed_decoder_model
model = mode_constructor(x.shape, y.shape[1],
len(x_tk.word_index) + 1,
len(y_tk.word_index) + 1)
model.summary()
checkpoint_path = f"freezed_translator_checkpoint_dir/freezed_{freezed_comp}/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
if use_check_point and os.listdir(checkpoint_dir).__len__() > 0:
latest_cp = tf.train.latest_checkpoint(checkpoint_dir)
print(f'loading last model from {latest_cp}')
model.load_weights(latest_cp)
with open(checkpoint_dir + '/' + 'summary', 'r') as f:
summary = json.load(f)
return model, summary
else:
# Create a callback that saves the model's weights
cp_callback = keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
# Train the model with the new callback
summary = model.fit(x, y,
batch_size=1024, epochs=25, validation_split=0.2,
callbacks=[cp_callback]) # Pass callback to training
with open(checkpoint_dir + '/' + 'summary', 'w') as f:
json.dump(summary.history, f)
return model, summary.history | 84ff59edc6aafa7a3ca3b634ef8a21b6ae1a44f8 | 2,081 |
def bpm_to_mspt(bpm, res=480):
"""
Coverts an integer value of beats per minute to miliseconds per quarter note
"""
return 60000 / res / bpm | 6b962b8253eac29f52c48ca89a6dce0417adb11b | 2,082 |
import pickle
import os
def Test_frcnn(test_images_list,
network_arch,
config_filename,
preprocessing_function = None,
num_rois = None,
final_classification_threshold = 0.8):
"""
Test the object detection network
test_images_list --list: list containing path to test_images (No default)
network_arc --object: the full faster rcnn network .py file passed as an object (no default)
config_filename --str: Full path to the config_file.pickle, generated while training (No default)
preprocessing_function --function: optional image preprocessing function (Default None)
num_rois --int: (optional)The number of ROIs to process at once in the final classifier (Default None)
if not given. The number of ROIs given while training is chosen
final_classification_threshold --float: (0,1) min threshold for accepting as a detection in final classifier (Default 0.8)
OUTPUT:
returns the images with bboxes over layed using opencv, and a dataframe with data
"""
nn = network_arch
assert "list" in str(type(test_images_list)),"test_images_list must be a list of paths to the test images"
with open(config_filename, 'rb') as f_in:
C = pickle.load(f_in)
if num_rois:
C.num_rois = int(num_rois)
# turn off any data augmentation at test time
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
def format_img_size(img, C): # utility function 1
""" formats the image size based on config """
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
ratio = img_min_side/width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side/height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio
def preprocess_img(img, preprocessing_function): #utility function 2
""" formats the image channels based on config """
img = img[:, :, (2, 1, 0)] #bgr to rgb
if preprocessing_function:
img = preprocessing_function(img)
#img = np.transpose(img, (2, 0, 1)) # convert to theano
img = np.expand_dims(img, axis=0)
return img
def format_img(img, C, preprocessing_function): # utility function 3
""" formats an image for model prediction based on config """
img, ratio = format_img_size(img, C)
img = preprocess_img(img, preprocessing_function)
return img, ratio
# Method to transform the coordinates of the bounding box to its original size
def get_real_coordinates(ratio, x1, y1, x2, y2): #utility function 4
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2 ,real_y2)
class_mapping = C.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.items()}
print(class_mapping)
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
# load the models
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
shared_layers = nn.nn_base(img_input)
num_features = shared_layers.get_shape().as_list()[3] #512 for vgg-16
feature_map_input = Input(shape=(None, None, num_features))
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, len(class_mapping))
# create a keras model
model_rpn = Model(img_input, rpn)
model_classifier = Model([feature_map_input, roi_input], classifier)
#Note: The model_classifier in training and testing are different.
# In training model_classifier and model_rpn both have the base_nn.
# while testing only model_rpn has the base_nn it returns the FM of base_nn
# Thus the model_classifier has the FM and ROI as input
# This id done to increase the testing speed
print('Loading weights from {}'.format(C.weights_all_path))
model_rpn.load_weights(C.weights_all_path, by_name=True)
model_classifier.load_weights(C.weights_all_path, by_name=True)
list_of_all_images=[]
df_list = []
for idx, filepath in enumerate(sorted(test_images_list)):
print(os.path.basename(filepath))
img = cv2.imread(filepath)
X, ratio = format_img(img, C, preprocessing_function)
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=C.rpn_nms_threshold,flag="test")
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0]//C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0]//C.num_rois:
#pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0],C.num_rois,curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = model_classifier.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
if np.max(P_cls[0, ii, :]) < final_classification_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
bboxes[cls_name].append([C.rpn_stride*x, C.rpn_stride*y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
probs_list = [] # new list for every image
coor_list = [] # new list for every image
classes_list = []# new list for every image
img_name_list = []# new list for ever image
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=C.test_roi_nms_threshold,max_boxes=C.TEST_RPN_POST_NMS_TOP_N) #0.3 default threshold from original implementation
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk,:]
(real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)
cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2)
textLabel = '{}: {}'.format(key,int(100*new_probs[jk]))
coor_list.append([real_x1,real_y1,real_x2,real_y2]) # get the coordinates
classes_list.append(key)
probs_list.append(100*new_probs[jk])
img_name_list.append(filepath)
(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1)
textOrg = (real_x1, real_y1-0)
cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2)
cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1)
cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)
df = pd.DataFrame({"Image_name":img_name_list,
"classes":classes_list,
"pred_prob":probs_list,
"x1_y1_x2_y2":coor_list})
list_of_all_images.append(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
df_list.append(df)
final_df = pd.concat(df_list,ignore_index=True)
return(list_of_all_images,final_df) | 678f874ae8c89c9d3899e839bb74cd35233b38e2 | 2,083 |
import numpy as np
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out | 5ec3dc8e66451a00d1f13f1df1df680879a16bc6 | 2,084 |
def next_hidden(s, A):
"""From a given state s, use the transition matrix A to generate the next
hidden state.
"""
return choose_idx(A[s]) | cc0b106ebeaa98ac2aeba947bd9ed0f653d233b5 | 2,085 |
import torch
def create_network_rcnn(cls, opt):
"""Separate function for rcnn, which always loads weights first, no init."""
net = cls(opt)
net.print_network()
util.load_network_path(net, opt.fastercnn_loc, strict=True, rcnn_load=True)
if len(opt.gpu_ids) > 0:
assert(torch.cuda.is_available())
net.cuda()
return net | d653aa9435435ace4f10b134d28ee474353805bb | 2,086 |
import tkinter
def get_board_frame(window, mqtt_sender):
"""Builds the chessboard GUI."""
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
frame_label = ttk.Label(frame, text="Board")
get_state = ttk.Button(frame, text="Get state")
get_state["command"] = lambda: handle_get_state(mqtt_sender)
mqtt_sender.state = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
box = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
frame_label.grid()
get_state.grid()
hint = {"0": "A", "1": "B", "2": "C", "3": "D", "4": "E", "5": "F", "6": "G", "7": "H"}
for k in range(8):
note = ttk.Label(frame, text=str(hint[str(k)]))
note.grid(row=0, column=k + 2)
for j in range(2):
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=1)
for k in range(8):
mqtt_sender.state[j][k] = tkinter.IntVar(value=1)
box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k])
box[j][k].grid(row=j + 1, column=k + 2)
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=10)
for j in range(2, 6):
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=1)
for k in range(8):
mqtt_sender.state[j][k] = tkinter.IntVar()
box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k])
box[j][k].grid(row=j + 1, column=k + 2)
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=10)
for j in range(6, 8):
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=1)
for k in range(8):
mqtt_sender.state[j][k] = tkinter.IntVar(value=1)
box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k])
box[j][k].grid(row=j + 1, column=k + 2)
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=10)
for k in range(8):
note = ttk.Label(frame, text=str(hint[str(k)]))
note.grid(row=10, column=k + 2)
return frame | d6f5a13312989613f8b945c8e73cedb9ee7e3851 | 2,087 |
import logging
def compute_rest_time(gps_data, radius):
"""Compute the duration during which the track stays in a given radius of each
point.
Args:
gps_data (:py:class:`~gps_data_analyzer.gps_data.PoiPoints`): The data used for
computation.
radius (float): The radius in which the rest time is computed around each point.
Returns:
``pandas.Series``: The rest time around each point.
"""
# TODO: need optimization and cleaning.
def _t_inter(current, i1, i2, max_radius, geom="geometry", t="datetime"):
d_i1 = i1[geom].distance(current[geom])
d_i2 = i2[geom].distance(current[geom])
t_i1 = i1[t]
t_i2 = i2[t]
dt = max(t_i1, t_i2) - min(t_i1, t_i2)
dd = abs(d_i1 - d_i2)
if dd == 0:
return dt
else:
return min(1.0, abs(d_i1 - max_radius) / dd) * dt
def _process_one_pt(num, points, max_radius, logger=logging):
logger.debug("{}: {}".format(num, points))
data = gps_data
pts = np.array(points)
pts.sort()
pos_i = np.argwhere(pts == num)[0][0]
diff_not_one = (pts[1:] - pts[:-1]) != 1
current = data.loc[num]
# TODO: make a function for inf and sup parts since the only difference is the
# order of diff_not_one and the limits for label_inf_m1 and label_sup_p1
# Inf part
if num > 0:
if len(diff_not_one[:pos_i]) > 0:
diff_not_one[0] = True
pos_skip_inf = pos_i - np.argmax(np.flip(diff_not_one[:pos_i]))
else:
pos_skip_inf = pos_i
label_inf = pts[pos_skip_inf]
label_inf_m1 = max(0, pts[pos_skip_inf] - 1)
inf = data.loc[label_inf]
inf_m1 = data.loc[label_inf_m1]
dt_inf = current["datetime"] - inf["datetime"]
t_inf_inter = dt_inf + _t_inter(current, inf, inf_m1, max_radius)
logger.debug("data:\n{}".format(data.loc[[num, label_inf, label_inf_m1]]))
logger.debug(
"distances = {}".format(
data.loc[[label_inf, label_inf_m1], "geometry"].distance(
current["geometry"]
)
)
)
else:
t_inf_inter = pd.Timedelta(0)
# Sup part
if num != data.index.max():
if len(diff_not_one[pos_i:]) > 0:
diff_not_one[-1] = True
pos_skip_sup = pos_i + np.argmax(diff_not_one[pos_i:])
else:
pos_skip_sup = pos_i
label_sup = pts[pos_skip_sup]
label_sup_p1 = min(data.index.max(), pts[pos_skip_sup] + 1)
sup = data.loc[label_sup]
sup_p1 = data.loc[label_sup_p1]
dt_sup = sup["datetime"] - current["datetime"]
t_sup_inter = dt_sup + _t_inter(current, sup, sup_p1, max_radius)
logger.debug("data:\n {}".format(data.loc[[num, label_sup, label_sup_p1]]))
logger.debug(
"distances = {}".format(
data.loc[[label_sup, label_sup_p1], "geometry"].distance(
current["geometry"]
)
)
)
else:
t_sup_inter = pd.Timedelta(0)
logger.debug("t_inf_inter = {}".format(t_inf_inter))
logger.debug("t_sup_inter = {}".format(t_sup_inter))
return t_inf_inter, t_sup_inter
# Get the closest points of each points
points = np.c_[gps_data.x.ravel(), gps_data.y.ravel()]
tree = spatial.KDTree(points)
points = tree.data
in_radius_pts = tree.query_ball_point(points, radius)
# Get the times when the track leave the circle with radius = radius
t_min = []
t_max = []
for num, i in enumerate(in_radius_pts):
t1, t2 = _process_one_pt(num, i, radius)
t_min.append(t1)
t_max.append(t2)
times = pd.DataFrame({"dt_min": t_min, "dt_max": t_max}, index=gps_data.index)
# Compute total time
duration = times["dt_min"] + times["dt_max"]
# Convert time in seconds
return duration.apply(pd.Timedelta.total_seconds) | 542e37c53948310e240924d729aea16a87c622b2 | 2,088 |
import html
def body():
"""Get map page body.
Returns:
html.Div: dash layout
"""
graph_map = get_graph_map()
if graph_map is None:
return html.Div(
dbc.Alert("Cannot retrieve data! Try again later!", color="danger")
)
# Put everything in a dcc container and return
body = dbc.Container(
[
dbc.Row(
dbc.Col(
dbc.Card(
dbc.CardBody(
[
html.P(
"A graph of the UK rail network generated from \
individual train movements captured from the Network Rail feeds and a subset of known fixed locations. \
Each node represents a train describer 'berth' which usually, but not always, represents a signal.\
Red nodes indicate the live locations of trains on the network, \
whilst the node size indicates the frequency of usage. Hovering over each node provides additional information.\
The graph is updated every 5 seconds. \
Only the west coast mainline central signal area (around Manchester) is considered for now."
),
]
),
color="secondary",
),
width={"size": 10, "offset": 1},
)
),
dbc.Row(dbc.Col(dcc.Graph(id="graph-map", figure=graph_map))),
dcc.Interval(
id="graph-page-interval",
interval=1 * 5000,
n_intervals=0, # in milliseconds
),
],
fluid=True,
)
return body | 6474602d65f71dadce26e043c62f35ec0c489a0f | 2,089 |
from datetime import datetime
def custom_strftime(formatting: str, date: datetime.datetime) -> str:
"""Custom strftime formatting function, using fancy number suffixes (1st, 2nd, 3rd...)"""
return date.strftime(formatting).replace("{S}", str(date.day) + suffix(date.day)) | 3199f6e0590f4bb01c1792976c75c7a0d4208831 | 2,090 |
import os
def make_workdir(run_dir, ccp4i2=False, MAX_WORKDIRS=100):
"""Make a work directory rooted at run_dir and return its path
Parameters
----------
run_dir : str
The path to a run directory where the job was started
ccp4i2 : bool, optional
Indicate if we are running under CCP4I2
Returns
-------
work_dir : str
The path to the working directory
"""
if ccp4i2:
work_dir = os.path.join(run_dir, I2DIR)
else:
run_inc = 0
while True:
work_dir = os.path.join(run_dir, AMPLEDIR + str(run_inc))
if not os.path.exists(work_dir):
break
run_inc += 1
if run_inc > MAX_WORKDIRS:
raise RuntimeError("Too many work directories! {0}".format(work_dir))
if os.path.exists(work_dir):
raise RuntimeError(
"There is an existing AMPLE work directory: {0}\n" "Please delete/move it aside.".format(work_dir)
)
os.mkdir(work_dir)
return work_dir | 3aacba4f0e3158a3c828b96f3f6b2954e947de21 | 2,091 |
def setup_twitter(config_file='config.py'):
"""Setup auth keys and session with Twitter client."""
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj | bb811f3b6cabbe5dbf8f77d8e5217078f9a57c22 | 2,092 |
from datetime import datetime
def create_datediff_test_nulls_df():
"""Create DataFrame with nulls only for DateDifferenceTransformer tests."""
df = pd.DataFrame(
{
"a": [
datetime.datetime(1993, 9, 27, 11, 58, 58),
np.NaN,
],
"b": [
np.NaN,
datetime.datetime(2019, 12, 25, 11, 58, 58),
],
},
index=[0, 1],
)
return df | 542fd3fdf6fcd93a208e3f1f9cd2a76a0c34e46b | 2,093 |
def business_days_list(start_date: date, end_date: date) -> list[date]:
""" business days """
us_holidays = holidays.UnitedStates()
days: list[date] = []
for the_date in get_list_of_days(start_date, end_date):
if (the_date.weekday() < 5) and (the_date not in us_holidays):
days.append(the_date)
return days | daa36fe5fda5fc0857c1b29c75d7e784cafefe93 | 2,094 |
def test_3d():
"""Test FE in 3D"""
def setone(arr):
arr[0, :, (arr.shape[0] - 1) // 2] = 1.0
return arr
assert pipe(
5,
lambda x: np.zeros((1, x, x, x), dtype=int),
setone,
solve_fe(elastic_modulus=(1.0, 10.0), poissons_ratio=(0.0, 0.0)),
lambda x: np.allclose(
[np.mean(x["strain"][0, ..., i]) for i in range(6)],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
),
) | 32f3d5fc18a31f01b2e366a3540ec77dd0e6080f | 2,095 |
from typing import List
def get_xray_edges(elements: List[str], wmin: float, wmax: float):
"""
Using xraydb, return the absorbtion edges
Parameters
----------
elements: List[str]
A list of the element symbols from which to query absorption edges.
wmin: float
The smallest wavelength edge to return
wmax: float
The largest wavelength edge to return
Returns
-------
output_table: List[str]
A table containing absorption edges.
- Elem: the element
- Energy: the photoionisation energy
- Frequency: the frequency of the absorption edge
- Wavelength: the wavelength of the absorption edge
"""
element_absortion_edges_dicts = []
for element in elements:
edges = xraydb.xray_edges(element)
element_absortion_edges_dicts.append(edges)
output_table = []
output_table.append("Elem {:15s} {:15s} {:15s}\n".format("Energy eV", "Frequency Hz", "Wavelength AA"))
for i, edges in enumerate(element_absortion_edges_dicts):
print("-" * COL_LEN)
print("{}: \n".format(elements[i]))
print("{:15s} {:15s} {:15s}".format("Energy eV", "Frequency Hz", "Wavelength AA"))
keys = edges.keys()
prev_key = "K"
for key in keys:
# This bit will skip edges which have the same energy, I hope
if prev_key != key:
if edges[prev_key][0] == edges[key][0]:
continue
prev_key = key
energy = edges[key][0]
frequency = energy / HEV
wavelength = C / frequency / ANGSTROM
print("{:9.1f} {:1.12e} {:13.1f}".format(energy, frequency, wavelength))
if wmin < wavelength < wmax:
output_table_line = "{:4s} {:9.1f} {:1.12e} {:13.1f}\n".format(
elements[i], energy, frequency, wavelength
)
output_table.append(output_table_line)
print()
print("-" * COL_LEN)
with open("xray_edges.txt", "w") as f:
f.writelines(output_table)
return output_table | b78c0b999f4faf9e749b3b8388f0a581a5bff476 | 2,096 |
import urllib
import json
def get_mobility_link():
"""Get Apple Mobility data link
"""
# get link
with urllib.request.urlopen(index_url) as url:
json_link = json.loads(url.read().decode())
base_path = json_link['basePath']
csv_path = json_link['regions']['en-us']['csvPath']
link = site_url + \
base_path + csv_path
return link | a097e9c0b787a522283d31a8a13d4d13b824b77b | 2,097 |
from datetime import datetime
def active_shift(app, token, gqlClient):
"""returns the currently active shift if it exists"""
with app.test_request_context():
request.headers = {'authorization': token}
query = '''mutation CreateShift($Active: Boolean!, $StartTime: String) {
createShift(active: $Active, startTime: $StartTime) {
shift { id startTime active }
}
}
'''
vars = {
'StartTime': (datetime.now() - timedelta(hours=5)).strftime('%Y-%m-%d %H:%M:%S'),
'Active': True
}
res = gqlClient.execute(query, context_value=request, variables=vars)
print("query result:", res)
assert res['data']['createShift']['shift']['active']
shift = res['data']['createShift']['shift']
return shift | 345ba7f30421e28b879bc5b14409c437b9038d89 | 2,098 |
def get_batch_size(input):
"""
Infer the mini-batch size according to `input`.
Args:
input (tf.Tensor): The input placeholder.
Returns:
int or tf.Tensor: The batch size.
"""
if input.get_shape() is None:
batch_size = tf.shape(input)[0]
else:
batch_size = int_shape(input)[0]
if batch_size is None:
batch_size = tf.shape(input)[0]
return batch_size | 66201a3a8223ad442f54ac9551060093ee828f9b | 2,099 |