content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def do(ARGV):
"""Allow to check whether the exception handlers are all in place.
"""
if len(ARGV) != 3: return False
elif ARGV[1] != "<<TEST:Exceptions/function>>" \
and ARGV[1] != "<<TEST:Exceptions/on-import>>": return False
if len(ARGV) < 3: return False
exception = ARGV[2]
if exception == "KeyboardInterrupt": raise KeyboardInterrupt()
elif exception == "AssertionError": raise AssertionError()
elif exception == "Exception": raise Exception()
# If we did not raise an exception here, we didn't do anything
print("No exception was triggered.")
return False | 56b83d119f74a00f1b557c370d75fb9ff633d691 | 1,255 |
def get_available_language_packs():
"""Get list of registered language packs.
:return list:
"""
ensure_autodiscover()
return [val for (key, val) in registry.registry.items()] | faf3c95ff808c1e970e49c56feb5ad1f61623053 | 1,256 |
import ctypes
def topo_star(jd_tt, delta_t, star, position, accuracy=0):
"""
Computes the topocentric place of a star at 'date', given its
catalog mean place, proper motion, parallax, and radial velocity.
Parameters
----------
jd_tt : float
TT Julian date for topocentric place.
delta_t : float
Difference TT-UT1 at 'date', in seconds of time.
star : CatEntry
Instance of CatEntry type object containing catalog data for
the object in the ICRS.
position : OnSurface
Instance of OnSurface type object specifying the position of
the observer.
accuracy : {0, 1}, optional
Code specifying the relative accuracy of the output
position.
= 0 ... full accuracy (default)
= 1 ... reduced accuracy
Returns
-------
(ra, dec) : tuple of floats
Topocentric (right ascension in hours, declination in
degrees), referred to true equator and equinox of date
'jd_tt'.
References
----------
.. [R1] Bangert, J. et. al. (2011), 'User's Guide to NOVAS
Version C3.1', C62-C63.
.. [R2] Explanatory Supplement to the Astronomical Almanac
(1992), Chapter 3.
"""
if jd_tt < 0.0:
raise ValueError(_neg_err.format(name='jd_tt'))
if accuracy not in [0, 1]:
raise ValueError(_option_err.format(name='accuracy', allowed=[0, 1]))
_topo_star = novaslib.topo_star
_topo_star.argtypes = (ctypes.c_double, ctypes.c_double,
ctypes.POINTER(CatEntry), ctypes.POINTER(OnSurface),
ctypes.c_short, ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double))
_topo_star.restype = ctypes.c_short
_topo_star.errcheck = _check_c_errors
_topo_star.c_errors = {
1: (ValueError, "from C function 'topo_star': Invalid value of 'where' in ctypes.Structure 'location'"),
11: (ValueError, "from C function 'make_object': invalid value of 'type'"),
12: (ValueError, "from C function 'make_object': 'number' out of range"),
13: (InitializationError, "from C function 'make_object': Initialization of 'cel_obj' failed (object name)."),
14: (InitializationError, "from C function 'make_object': Initialization of 'cel_obj' failed (catalog name)."),
15: (ValueError, "from C function 'make_object': 'name' is out of string bounds."),
21: (ValueError, "from C function 'place': invalid value of 'coord_sys'"),
22: (ValueError, "from C function 'place': invalid value of 'accuracy'"),
23: (ValueError, "from C function 'place': Earth is the observed object, and the observer is either at the geocenter or on the Earth's surface (not permitted)")
}
ra = ctypes.c_double()
dec = ctypes.c_double()
_topo_star(jd_tt, delta_t, ctypes.byref(star), ctypes.byref(position),
accuracy, ctypes.byref(ra), ctypes.byref(dec))
return (ra.value, dec.value) | fba937116b5f63b450fb028cc68a26e0e10305ae | 1,257 |
def py_multiplicative_inverse(a, n):
"""Multiplicative inverse of a modulo n (in Python).
Implements extended Euclidean algorithm.
Args:
a: int-like np.ndarray.
n: int.
Returns:
Multiplicative inverse as an int32 np.ndarray with same shape as a.
"""
batched_a = np.asarray(a, dtype=np.int32)
n = np.asarray(n, dtype=np.int32)
batched_inverse = []
for a in np.nditer(batched_a):
inverse = 0
new_inverse = 1
remainder = n
new_remainder = a
while new_remainder != 0:
quotient = remainder // new_remainder
(inverse, new_inverse) = (new_inverse, inverse - quotient * new_inverse)
(remainder, new_remainder) = (new_remainder,
remainder - quotient * new_remainder)
if remainder > 1:
raise ValueError(
'Inverse for {} modulo {} does not exist.'.format(a, n))
if inverse < 0:
inverse += n
batched_inverse.append(inverse)
return np.asarray(batched_inverse, dtype=np.int32).reshape(batched_a.shape) | 87f4e21f9f8b5a9f10dbf4ec80128a37c1fa912c | 1,258 |
def resample_nearest_neighbour(input_tif, extents, new_res, output_file):
"""
Nearest neighbor resampling and cropping of an image.
:param str input_tif: input geotiff file path
:param list extents: new extents for cropping
:param float new_res: new resolution for resampling
:param str output_file: output geotiff file path
:return: dst: resampled image
:rtype: ndarray
"""
dst, resampled_proj, src, _ = _crop_resample_setup(extents, input_tif,
new_res, output_file)
# Do the work
gdal.ReprojectImage(src, dst, '', resampled_proj,
gdalconst.GRA_NearestNeighbour)
return dst.ReadAsArray() | 107bcb72aff9060d024ff00d86b164cf41078630 | 1,260 |
def harvester_api_info(request, name):
"""
This function returns the pretty rendered
api help text of an harvester.
"""
harvester = get_object_or_404(Harvester, name=name)
api = InitHarvester(harvester).get_harvester_api()
response = api.api_infotext()
content = response.data[harvester.name].replace('\n', '<br>')
return HttpResponse(content, content_type='text/plain') | 6b02168d7c77414c57ca74104ff93dae1e698e30 | 1,261 |
import sqlite3
def init_db():
"""Open SQLite database, create facebook table, return connection."""
db = sqlite3.connect('facebook.sql')
cur = db.cursor()
cur.execute(SQL_CREATE)
db.commit()
cur.execute(SQL_CHECK)
parse = list(cur.fetchall())[0][0] == 0
return db, cur, parse | 61d8cc968c66aaddfc55ef27ee02dec13c4b28f2 | 1,262 |
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None | bf6bc2f7b0a7bb9eaa23a0c28686bfe16a8e3ced | 1,264 |
def module_for_category( category ):
"""Return the OpenGL.GL.x module for the given category name"""
if category.startswith( 'VERSION_' ):
name = 'OpenGL.GL'
else:
owner,name = category.split( '_',1)
if owner.startswith( '3' ):
owner = owner[1:]
name = 'OpenGL.GL.%s.%s'%( owner,name )
return __import__( name, {}, {}, name.split( '.' )) | 0e88467a1dd7f5b132d46a9bdc99765c274f69f3 | 1,265 |
def timestamp() -> str:
"""generate formatted timestamp for the invocation moment"""
return dt.now().strftime("%d-%m-%Y %H:%M:%S") | 4f5e3de7f8d0027a210055850c4fa2b4764a39b2 | 1,267 |
def sde(trains, events=None, start=0 * pq.ms, stop=None,
kernel_size=100 * pq.ms, optimize_steps=0,
minimum_kernel=10 * pq.ms, maximum_kernel=500 * pq.ms,
kernel=None, time_unit=pq.ms, progress=None):
""" Create a spike density estimation plot.
The spike density estimations give an estimate of the instantaneous
rate. Optionally finds optimal kernel size for given data.
:param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists.
:param dict events: A dictionary (with the same indices as ``trains``)
of Event objects or lists of Event objects. In case of lists,
the first event in the list will be used for alignment. The events
will be at time 0 on the plot. If None, spike trains are used
unmodified.
:param start: The desired time for the start of the first bin. It
will be recalculated if there are spike trains which start later
than this time. This parameter can be negative (which could be
useful when aligning on events).
:type start: Quantity scalar
:param stop: The desired time for the end of the last bin. It will
be recalculated if there are spike trains which end earlier
than this time.
:type stop: Quantity scalar
:param kernel_size: A uniform kernel size for all spike trains.
Only used if optimization of kernel sizes is not used (i.e.
``optimize_steps`` is 0).
:type kernel_size: Quantity scalar
:param int optimize_steps: The number of different kernel sizes tried
between ``minimum_kernel`` and ``maximum_kernel``.
If 0, ``kernel_size`` will be used.
:param minimum_kernel: The minimum kernel size to try in optimization.
:type minimum_kernel: Quantity scalar
:param maximum_kernel: The maximum kernel size to try in optimization.
:type maximum_kernel: Quantity scalar
:param kernel: The kernel function or instance to use, should accept
two parameters: A ndarray of distances and a kernel size.
The total area under the kernel function should be 1.
Automatic optimization assumes a Gaussian kernel and will
likely not produce optimal results for different kernels.
Default: Gaussian kernel
:type kernel: func or :class:`spykeutils.signal_processing.Kernel`
:param Quantity time_unit: Unit of X-Axis.
:param progress: Set this parameter to report progress.
:type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`
"""
if not progress:
progress = ProgressIndicator()
start.units = time_unit
if stop:
stop.units = time_unit
kernel_size.units = time_unit
minimum_kernel.units = time_unit
maximum_kernel.units = time_unit
if kernel is None:
kernel = signal_processing.GaussianKernel(100 * pq.ms)
# Align spike trains
for u in trains:
if events:
trains[u] = rate_estimation.aligned_spike_trains(
trains[u], events)
# Calculate spike density estimation
if optimize_steps:
steps = sp.logspace(sp.log10(minimum_kernel),
sp.log10(maximum_kernel),
optimize_steps) * time_unit
sde, kernel_size, eval_points = \
rate_estimation.spike_density_estimation(
trains, start, stop,
optimize_steps=steps, kernel=kernel,
progress=progress)
else:
sde, kernel_size, eval_points = \
rate_estimation.spike_density_estimation(
trains, start, stop,
kernel_size=kernel_size, kernel=kernel,
progress=progress)
progress.done()
if not sde:
raise SpykeException('No spike trains for SDE!')
# Plot
win_title = 'Kernel Density Estimation'
win = PlotDialog(toolbar=True, wintitle=win_title)
pW = BaseCurveWidget(win)
plot = pW.plot
plot.set_antialiasing(True)
for u in trains:
if u and u.name:
name = u.name
else:
name = 'Unknown'
curve = make.curve(
eval_points, sde[u],
title='%s, Kernel width %.2f %s' %
(name, kernel_size[u], time_unit.dimensionality.string),
color=helper.get_object_color(u))
plot.add_item(curve)
plot.set_axis_title(BasePlot.X_BOTTOM, 'Time')
plot.set_axis_unit(BasePlot.X_BOTTOM, eval_points.dimensionality.string)
plot.set_axis_title(BasePlot.Y_LEFT, 'Rate')
plot.set_axis_unit(BasePlot.Y_LEFT, 'Hz')
l = make.legend()
plot.add_item(l)
win.add_plot_widget(pW, 0)
win.add_custom_curve_tools()
win.add_legend_option([l], True)
win.show()
return win | 0b045ec676a9c31f4e0f89361d5ff8c13a238624 | 1,268 |
def content(obj):
"""Strip HTML tags for list display."""
return strip_tags(obj.content.replace('</', ' </')) | 413eed5f6b9ede0f31ede6a029e111a2910cc805 | 1,269 |
def flux(Q, N, ne, Ap, Am):
"""
calculates the flux between two boundary sides of
connected elements for element i
"""
# for every element we have 2 faces to other elements (left and right)
out = np.zeros((ne, N + 1, 2))
# Calculate Fluxes inside domain
for i in range(1, ne - 1):
out[i, 0, :] = Ap @ (-Q[i - 1, N, :]) + Am @ (-Q[i, 0, :])
out[i, N, :] = Ap @ (Q[i, N, :]) + Am @ (Q[i + 1, 0, :])
# Boundaries
# Left
out[0, 0, :] = Ap @ np.array([0, 0]) + Am @ (-Q[i, 0, :])
out[0, N, :] = Ap @ (Q[0, N, :]) + Am @ (Q[1, 0, :])
# Right
out[ne - 1, 0, :] = Ap @ (-Q[ne - 2, N, :]) + Am @ (-Q[ne - 1, 0, :])
out[ne - 1, N, :] = Ap @ (Q[ne - 1, N, :]) + Am @ np.array([0, 0])
return out | decc1b84cd0f23ac7f437d2c47e76cf6ed961a28 | 1,270 |
import shutil
def cp_dir(src_dir, dest_dir):
"""Function: cp_dir
Description: Copies a directory from source to destination.
Arguments:
(input) src_dir -> Source directory.
(input) dest_dir -> Destination directory.
(output) status -> True|False - True if copy was successful.
(output) err_msg -> Error message from copytree exception or None.
"""
status = True
err_msg = None
try:
shutil.copytree(src_dir, dest_dir)
# Directory permission error.
except shutil.Error as err:
err_msg = "Directory not copied. Perms Error Message: %s" % (err)
status = False
# Directory does not exist.
except OSError as err:
err_msg = "Directory not copied. Exist Error Message: %s" % (err)
status = False
return status, err_msg | 13f82a485fb46e102780c2462f0ab092f0d62df1 | 1,271 |
import torch
def listnet_loss(y_i, z_i):
"""
y_i: (n_i, 1)
z_i: (n_i, 1)
"""
P_y_i = F.softmax(y_i, dim=0)
P_z_i = F.softmax(z_i, dim=0)
return - torch.sum(y_i * torch.log(P_z_i)) | c2b7dd9800ed591af392b17993c70b443f99524c | 1,272 |
def normalize(data, **kw):
"""Calculates the normalization of the given array. The normalizated
array is returned as a different array.
Args:
data The data to be normalized
Kwargs:
upper_bound The upper bound of the normalization. It has the value
of 1 by default.
lower_bound The lower bound to be used for normalization. It has the
value of 0 by default
dtype The type of the returned ndarray. If the dtype given is an
integer type the returned array values will be truncated after
normalized.
Returns:
An instance of np.array with normalizated values
"""
upper_bound = 1
lower_bound = 0
dtype = np.float64
if 'upper_bound' in kw:
upper_bound = kw['upper_bound']
if 'lower_bound' in kw:
lower_bound = kw['lower_bound']
if 'dtype' in kw:
dtype = kw['dtype']
check_ndarray(data)
newdata = data - data.min()
newdata = newdata / newdata.max()
newdata = newdata * (upper_bound - lower_bound)
newdata += lower_bound
return newdata.astype(dtype) | 2f6f1a28a5bac4eee221923465a022c79ec185af | 1,274 |
def cc_across_time(tfx, tfy, cc_func, cc_args=()):
"""Cross correlations across time.
Args:
tfx : time-frequency domain signal 1
tfy : time-frequency domain signal 2
cc_func : cross correlation function.
cc_args : list of extra arguments of cc_func.
Returns:
cc_atime : cross correlation at different time.
Note:
If tfx and tfy are not of the same length, the result will be
truncated to the shorter one.
"""
return np.array([cc_func(x, y, *cc_args) for x, y in zip(tfx, tfy)]) | c22670b2f722884b048758dbc20df3bc58cd9b0f | 1,276 |
import chardet
def predict_encoding(file_path, n_lines=20):
"""Predict a file's encoding using chardet"""
# Open the file as binary data
with open(file_path, "rb") as f:
# Join binary lines for specified number of lines
rawdata = b"".join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)["encoding"] | 1ccef9982846fe0c88124b9e583cf68be070e63a | 1,277 |
def redirect_handler(url, client_id, client_secret, redirect_uri, scope):
"""
Convenience redirect handler.
Provide the redirect url (containing auth code)
along with client credentials.
Returns a spotify access token.
"""
auth = ExtendedOAuth(
client_id, client_secret, redirect_uri, scope=scope)
code = auth.parse_response_code(url)
token = auth.get_access_token(code)
return token | c682af3d7da51afdcba9a46aa4b44dd983d3fe40 | 1,278 |
def convert_coordinate(coordinate):
"""
:param coordinate: str - a string map coordinate
:return: tuple - the string coordinate seperated into its individual components.
"""
coord = (coordinate[0], coordinate[1])
return coord | a3852f5b4e4faac066c8f71e945ed7f46fbf2509 | 1,279 |
from typing import List
def get_noun_phrases(doc: Doc) -> List[Span]:
"""Compile a list of noun phrases in sense2vec's format (without
determiners). Separated out to make it easier to customize, e.g. for
languages that don't implement a noun_chunks iterator out-of-the-box, or
use different label schemes.
doc (Doc): The Doc to get noun phrases from.
RETURNS (list): The noun phrases as a list of Span objects.
"""
trim_labels = ("advmod", "amod", "compound")
spans = []
if doc.is_parsed:
for np in doc.noun_chunks:
while len(np) > 1 and np[0].dep_ not in trim_labels:
np = np[1:]
spans.append(np)
return spans | 38d78164147b012437f7c8b8d4c7fe13eb574515 | 1,282 |
import json
def load_file_from_url(url):
"""Load the data from url."""
url_path = get_absolute_url_path(url, PATH)
response = urlopen(url_path)
contents = json.loads(response.read())
return parse_file_contents(contents, url_path.endswith(".mrsys")) | 7eaa3d666c9e1fbdd9bad57047dd1b98712bd22b | 1,283 |
def speedPunisherMin(v, vmin):
"""
:param v:
:param vmin:
:return:
"""
x = fmin(v - vmin, 0)
return x ** 2 | 9e6e929226ea20d70d26f6748f938981885914c7 | 1,284 |
def hexagonal_packing_cross_section(nseeds, Areq, insu, out_insu):
""" Make a hexagonal packing and scale the result to be Areq cross section
Parameter insu must be a percentage of the strand radius.
out_insu is the insulation thickness around the wire as meters
Returns:
(wire diameter, strand diameter, strand center points)
"""
seeds = np.linspace(-0.5, 0.5,nseeds)
dx = seeds[1]-seeds[0]
xs, ys = np.meshgrid(seeds, seeds)
if (nseeds-1) % 4 == 0:
ys[:,1::2] = ys[:,1::2] + 0.5*dx;
else:
ys[:,0::2] = ys[:,0::2] + 0.5*dx;
ys = ys*2/np.sqrt(3);
points = np.stack([xs.reshape(-1), ys.reshape(-1)], axis=1)
vor = Voronoi(points)
hexs = [v for v in vor.regions if len(v) == 6]
all_cells = vor.vertices[hexs, :]
max_dists = np.max(np.linalg.norm(all_cells, axis=2), axis=1)
cells = all_cells[max_dists < 0.5, :]
strand_cps = np.mean(cells, axis=1)
# if strand bundle is not symmetric, it will be off center so...
# move it back to center
strand_cps = strand_cps - np.mean(strand_cps, axis=0)
# quite a silly way to calculate the strand diameter.but it indeed is
# the minimum of the distances from the first cell center to all the rest
# minus the insulation thickness
strand_diam = np.min(np.linalg.norm(strand_cps[1:]-strand_cps[0], axis=1))*(1-insu)
nstrands = len(strand_cps)
Acu = nstrands*(strand_diam/2)**2*np.pi
scale = np.sqrt(Areq/Acu)
strand_cps_scaled = scale*strand_cps
strand_diam_scaled = scale*strand_diam
wire_diameter = (np.max(np.linalg.norm(strand_cps_scaled, axis=1), axis=0)*2
+ strand_diam_scaled*(1+insu)/(1-insu)
+ out_insu)
return wire_diameter, strand_diam_scaled, strand_cps_scaled | 759cc26a9606ac327851d9b1e691052123029d66 | 1,286 |
def bk():
"""
Returns an RGB object representing a black pixel.
This function is created to make smile() more legible.
"""
return introcs.RGB(0,0,0) | 0343367302c601fce9057a8191b666a098eaec81 | 1,287 |
def autoEpochToTime(epoch):
"""
Converts a long offset from Epoch value to a DBDateTime. This method uses expected date ranges to
infer whether the passed value is in milliseconds, microseconds, or nanoseconds. Thresholds used are
TimeConstants.MICROTIME_THRESHOLD divided by 1000 for milliseconds, as-is for microseconds, and
multiplied by 1000 for nanoseconds. The value is tested to see if its ABS exceeds the threshold. E.g. a value
whose ABS is greater than 1000 * TimeConstants.MICROTIME_THRESHOLD will be treated as nanoseconds.
:param epoch: (long) - The long Epoch offset value to convert.
:return: (io.deephaven.db.tables.utils.DBDateTime) null, if the input is equal to QueryConstants.NULL_LONG, otherwise a DBDateTime based
on the inferred conversion.
"""
return _java_type_.autoEpochToTime(epoch) | 1f2ae0397044c19413544a359a1d966a4f223128 | 1,288 |
def compile_recursive_descent(file_lines, *args, **kwargs):
"""Given a file and its lines, recursively compile until no ksx statements remain"""
visited_files = kwargs.get('visited_files', set())
# calculate a hash of the file_lines and check if we have already compiled
# this one
file_hash = hash_file_contents(file_lines)
if len(visited_files) > RECURSION_DESCENT_LIMIT:
msg = (
"Compiler appears to be in a circular reference loop, "
"this is currently non-recoverable and is a known issue.\n\n"
"See: https://github.com/LeonardMH/kos-scripts/issues/7 \n\n"
"In the meantime check your library for files which import a "
"file, where that file imports the original (A->B->A).\n\n"
"You might also attempt using the 'from x import y' syntax which "
"has slightly narrower scope."
)
raise CircularImportError(msg)
if file_hash in visited_files:
# we have already compiled this file, no need to do so again
return ""
else:
# we will now compile the file, mark that it has been visited
visited_files.add(file_hash)
# compile and split back out to individual lines
file_oneline = compile_single_file_lines(file_lines, *args, **kwargs)
file_lines = file_oneline.split('\n')
# if there are no more ksx directives in the lines compiled we are done,
# return the stringified compile result
if not file_has_ksx_directive(file_lines):
return file_oneline
# if there are still more ksx directives in the lines compiled so far, run
# again
kwargs['visited_files'] = visited_files
return compile_recursive_descent(file_lines, *args, **kwargs).rstrip() + '\n' | 9e5306c2d2cc6696883ac3ec37114c13340fe1f5 | 1,289 |
def majority_voting(masks, voting='hard', weights=None, threshold=0.5):
"""Soft Voting/Majority Rule mask merging; Signature based upon the Scikit-learn VotingClassifier (https://github.com/scikit-learn/scikit-learn/blob/2beed55847ee70d363bdbfe14ee4401438fba057/sklearn/ensemble/_voting.py#L141)
Parameters
----------
masks : segmentations masks to merge, ndarray
Expected shape is num_of_masks * 1 * h * w
Accepts masks in range 0-1 (i.e apply sigmoid before passing to this function)
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
threshold : for separating between the positive and negative class, default=0.5
Applied first in case of hard voting and applied last in case of soft voting
"""
assert len(masks.shape) == 4
if voting not in ('soft', 'hard'):
raise ValueError(f"Voting must be 'soft' or 'hard'; got (voting= {voting})")
for m in masks:
assert (m >= 0.).all() and (m <= 1.).all()
if voting == 'hard':
masks = (masks >= threshold).astype(np.float32)
if weights is None:
weights = np.array([1] * masks.shape[0])
else:
weights = np.array(weights)
# Broadcasting starts with the trailing (i.e. rightmost) dimensions and works its way left, therefore we move the "mask" dimension to the right
masks= np.transpose(masks, (1, 2, 3, 0))
masks = masks * weights
masks= np.transpose(masks, (3, 0, 1, 2))
masks = masks.sum(axis=0)
if voting == 'soft':
masks = (masks >= (threshold * weights.sum())).astype(np.float32)
elif voting == 'hard': # Same as doing a majority vote
masks = (masks > (0.5 * weights.sum())).astype(np.float32)
assert len(masks.shape) == 3
return masks.astype(np.float32) | 882e98bc3a0c817c225f740042eb43b3bc4734fa | 1,290 |
def animate(zdata,
xdata,
ydata,
conversionFactorArray,
timedata,
BoxSize,
timeSteps=100,
filename="particle"):
"""
Animates the particle's motion given the z, x and y signal (in Volts)
and the conversion factor (to convert between V and nm).
Parameters
----------
zdata : ndarray
Array containing the z signal in volts with time.
xdata : ndarray
Array containing the x signal in volts with time.
ydata : ndarray
Array containing the y signal in volts with time.
conversionFactorArray : ndarray
Array of 3 values of conversion factors for z, x and y (in units of Volts/Metre)
timedata : ndarray
Array containing the time data in seconds.
BoxSize : float
The size of the box in which to animate the particle - in nm
timeSteps : int, optional
Number of time steps to animate
filename : string, optional
filename to create the mp4 under (<filename>.mp4)
"""
timePerFrame = 0.203
print("This will take ~ {} minutes".format(timePerFrame * timeSteps / 60))
convZ = conversionFactorArray[0] * 1e-9
convX = conversionFactorArray[1] * 1e-9
convY = conversionFactorArray[2] * 1e-9
ZBoxStart = -BoxSize # 1/conv*(_np.mean(zdata)-0.06)
ZBoxEnd = BoxSize # 1/conv*(_np.mean(zdata)+0.06)
XBoxStart = -BoxSize # 1/conv*(_np.mean(xdata)-0.06)
XBoxEnd = BoxSize # 1/conv*(_np.mean(xdata)+0.06)
YBoxStart = -BoxSize # 1/conv*(_np.mean(ydata)-0.06)
YBoxEnd = BoxSize # 1/conv*(_np.mean(ydata)+0.06)
FrameInterval = 1 # how many timesteps = 1 frame in animation
a = 20
b = 0.6 * a
myFPS = 7
myBitrate = 1000000
fig = _plt.figure(figsize=(a, b))
ax = fig.add_subplot(111, projection='3d')
ax.set_title("{} us".format(timedata[0] * 1000000))
ax.set_xlabel('X (nm)')
ax.set_xlim([XBoxStart, XBoxEnd])
ax.set_ylabel('Y (nm)')
ax.set_ylim([YBoxStart, YBoxEnd])
ax.set_zlabel('Z (nm)')
ax.set_zlim([ZBoxStart, ZBoxEnd])
ax.view_init(20, -30)
# ax.view_init(0, 0)
def setup_plot():
XArray = 1 / convX * xdata[0]
YArray = 1 / convY * ydata[0]
ZArray = 1 / convZ * zdata[0]
scatter = ax.scatter(XArray, YArray, ZArray)
return scatter,
def animate(i):
# print "\r {}".format(i),
print("Frame: {}".format(i), end="\r")
ax.clear()
ax.view_init(20, -30)
ax.set_title("{} us".format(int(timedata[i] * 1000000)))
ax.set_xlabel('X (nm)')
ax.set_xlim([XBoxStart, XBoxEnd])
ax.set_ylabel('Y (nm)')
ax.set_ylim([YBoxStart, YBoxEnd])
ax.set_zlabel('Z (nm)')
ax.set_zlim([ZBoxStart, ZBoxEnd])
XArray = 1 / convX * xdata[i]
YArray = 1 / convY * ydata[i]
ZArray = 1 / convZ * zdata[i]
scatter = ax.scatter(XArray, YArray, ZArray)
ax.scatter([XArray], [0], [-ZBoxEnd], c='k', alpha=0.9)
ax.scatter([-XBoxEnd], [YArray], [0], c='k', alpha=0.9)
ax.scatter([0], [YBoxEnd], [ZArray], c='k', alpha=0.9)
Xx, Yx, Zx, Xy, Yy, Zy, Xz, Yz, Zz = [], [], [], [], [], [], [], [], []
for j in range(0, 30):
Xlast = 1 / convX * xdata[i - j]
Ylast = 1 / convY * ydata[i - j]
Zlast = 1 / convZ * zdata[i - j]
Alpha = 0.5 - 0.05 * j
if Alpha > 0:
ax.scatter([Xlast], [0 + j * 10], [-ZBoxEnd],
c='grey',
alpha=Alpha)
ax.scatter([-XBoxEnd], [Ylast], [0 - j * 10],
c='grey',
alpha=Alpha)
ax.scatter([0 - j * 2], [YBoxEnd], [Zlast],
c='grey',
alpha=Alpha)
Xx.append(Xlast)
Yx.append(0 + j * 10)
Zx.append(-ZBoxEnd)
Xy.append(-XBoxEnd)
Yy.append(Ylast)
Zy.append(0 - j * 10)
Xz.append(0 - j * 2)
Yz.append(YBoxEnd)
Zz.append(Zlast)
if j < 15:
XCur = 1 / convX * xdata[i - j + 1]
YCur = 1 / convY * ydata[i - j + 1]
ZCur = 1 / convZ * zdata[i - j + 1]
ax.plot([Xlast, XCur], [Ylast, YCur], [Zlast, ZCur], alpha=0.4)
ax.plot_wireframe(Xx, Yx, Zx, color='grey')
ax.plot_wireframe(Xy, Yy, Zy, color='grey')
ax.plot_wireframe(Xz, Yz, Zz, color='grey')
return scatter,
anim = _animation.FuncAnimation(fig,
animate,
int(timeSteps / FrameInterval),
init_func=setup_plot,
blit=True)
_plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
mywriter = _animation.FFMpegWriter(fps=myFPS, bitrate=myBitrate)
# , fps = myFPS, bitrate = myBitrate)
anim.save('{}.mp4'.format(filename), writer=mywriter)
return None | aa0f08481f7efc39dae725a0c5f7fbc377586261 | 1,291 |
import re
def name_of_decompressed(filename):
""" Given a filename check if it is in compressed type (any of
['.Z', '.gz', '.tar.gz', '.zip']; if indeed it is compressed return the
name of the uncompressed file, else return the input filename.
"""
dct = {
'.Z': re.compile('.Z$'),
'.tar.gz': re.compile('.tar.gz$'),
'.gz': re.compile('.gz$'),
'.zip': re.compile('.zip$')
}
ctype = find_os_compression_type(filename)
if ctype is None:
return filename
try:
return re.sub(dct[ctype], '', filename)
except:
raise RuntimeError('[ERROR] decompress:name_of_decompressed Failed!') | ee0c49edca853fbf1da8caccbba68c9cde391f6b | 1,292 |
import random
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1 | 2e8a5e2d3c8fd6770e78a6ad30afc52f63c43073 | 1,293 |
def benchmark(func):
"""Decorator to mark a benchmark."""
BENCHMARKS[func.__name__] = func
return func | 0edadb46c446ed5603434d14ab7a40cdf76651b5 | 1,294 |
def do_positive_DFT(data_in, tmax):
"""
Do Discrete Fourier transformation and take POSITIVE frequency component part.
Args:
data_in (array): input data.
tmax (float): sample frequency.
Returns:
data_s (array): output array with POSITIVE frequency component part.
data_w (array): the Discrete Fourier Transform sample frequencies POSITIVE frequency component part.
"""
data_s = np.fft.fft(data_in)
data_w = np.fft.fftfreq(tmax)
# only take the positive frequency components
return data_w[0:tmax//2], data_s[0:tmax//2] | c3bab6b9595cf77869f65eacf6acf6d7f990ca10 | 1,295 |
def service(base_app, location):
"""Service fixture."""
return base_app.extensions["invenio-records-lom"].records_service | 52ad7f4624e7d0af153f0fcaaccfb56effddb86d | 1,296 |
def check_file_content(path, expected_content):
"""Check file has expected content.
:param str path: Path to file.
:param str expected_content: Expected file content.
"""
with open(path) as input:
return expected_content == input.read() | 77bdfae956ce86f2422ed242c4afcaab19cab384 | 1,297 |
from datetime import datetime
import select
def verify_apikey(payload,
raiseonfail=False,
override_authdb_path=None,
override_permissions_json=None,
config=None):
"""Checks if an API key is valid.
This version does not require a session.
Parameters
----------
payload : dict
This dict contains a single key:
- apikey_dict: the decrypted and verified API key info dict from the
frontend.
- user_id: the user ID of the person wanting to verify this key.
- user_role: the user role of the person wanting to verify this key.
raiseonfail : bool
If True, will raise an Exception if something goes wrong.
override_authdb_path : str or None
If given as a str, is the alternative path to the auth DB.
override_permissions_json : str or None
If given as a str, is the alternative path to the permissions JSON to
use. This is used to check if the user_id is allowed to actually verify
("read") an API key.
config : SimpleNamespace object or None
An object containing systemwide config variables as attributes. This is
useful when the wrapping function needs to pass in some settings
directly from environment variables.
Returns
-------
dict
The dict returned is of the form::
{'success': True if API key is OK and False otherwise,
'messages': list of str messages if any}
"""
for key in ('reqid', 'pii_salt'):
if key not in payload:
LOGGER.error(
"Missing %s in payload dict. Can't process this request." % key
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'apikey': None,
'expires': None,
'messages': ["Invalid API key request."],
}
for key in ('apikey_dict', 'user_id', 'user_role'):
if key not in payload:
LOGGER.error(
'[%s] Invalid API key request, missing %s.' %
(payload['reqid'], key)
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'messages': ["Some required keys are missing from payload."]
}
apikey_dict = payload['apikey_dict']
user_id = payload['user_id']
user_role = payload['user_role']
# check if the user is allowed to read the presented API key
apikey_verify_allowed = check_user_access(
{'user_id': user_id,
'user_role': user_role,
'action': 'view',
'target_name': 'apikey',
'target_owner': apikey_dict['uid'],
'target_visibility': 'private',
'target_sharedwith': None,
'reqid': payload['reqid'],
'pii_salt': payload['pii_salt']},
raiseonfail=raiseonfail,
override_permissions_json=override_permissions_json,
override_authdb_path=override_authdb_path
)
if not apikey_verify_allowed['success']:
LOGGER.error(
"[%s] Invalid API key verification request. "
"from user_id: %s, role: %s. The API key presented is "
"not readable by this user." %
(payload['reqid'],
pii_hash(user_id, payload['pii_salt']),
pii_hash(user_role, payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"originating user is not allowed to operate on this API key"
),
'messages': ["API key verification failed. "
"You are not allowed to operate on this API key."]
}
# this checks if the database connection is live
currproc = mp.current_process()
engine = getattr(currproc, 'authdb_engine', None)
if override_authdb_path:
currproc.auth_db_path = override_authdb_path
if not engine:
currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = (
authdb.get_auth_db(
currproc.auth_db_path,
echo=raiseonfail
)
)
apikeys = currproc.authdb_meta.tables['apikeys_nosession']
# the apikey sent to us must match the stored apikey's properties:
# - token
# - userid
# - expired must be in the future
# - issued must be in the past
# - not_valid_before must be in the past
dt_utcnow = datetime.utcnow()
sel = select([
apikeys.c.apikey,
apikeys.c.expires,
]).select_from(apikeys).where(
apikeys.c.apikey == apikey_dict['tkn']
).where(
apikeys.c.user_id == apikey_dict['uid']
).where(
apikeys.c.user_role == apikey_dict['rol']
).where(
apikeys.c.expires > dt_utcnow
).where(
apikeys.c.issued < dt_utcnow
).where(
apikeys.c.not_valid_before < dt_utcnow
)
result = currproc.authdb_conn.execute(sel)
row = result.fetchone()
result.close()
if row is not None and len(row) != 0:
LOGGER.info(
"[%s] No-session API key verified successfully. "
"user_id: %s, role: '%s', audience: '%s', subject: '%s', "
"apiversion: %s, expires on: %s" %
(payload['reqid'],
pii_hash(apikey_dict['uid'],
payload['pii_salt']),
apikey_dict['rol'],
apikey_dict['aud'],
apikey_dict['sub'],
apikey_dict['ver'],
apikey_dict['exp'])
)
return {
'success': True,
'messages': [(
"No-session API key verified successfully. Expires: %s." %
row['expires'].isoformat()
)]
}
else:
LOGGER.error(
"[%s] No-session API key verification failed. Failed key "
"user_id: %s, role: '%s', audience: '%s', subject: '%s', "
"apiversion: %s, expires on: %s" %
(payload['reqid'],
pii_hash(apikey_dict['uid'],
payload['pii_salt']),
apikey_dict['rol'],
apikey_dict['aud'],
apikey_dict['sub'],
apikey_dict['ver'],
apikey_dict['exp'])
)
return {
'success': False,
'failure_reason': (
"key validation failed, "
"provided key does not match stored key or has expired"
),
'messages': [(
"API key could not be verified."
)]
} | f1f5d9f65b2c9b8b9175ea4729042d9bb040a0e7 | 1,299 |
def case_mc2us(x):
""" mixed case to underscore notation """
return case_cw2us(x) | 13cd638311bea75699789a2f13b7a7d854f856bd | 1,301 |
def detail_url(reteta_id):
""""Return reteta detail URL"""
return reverse('reteta:reteta-detail', args=[reteta_id]) | 4b7219b5e0d7ae32656766a08c34f54a02d1634e | 1,303 |
def load_metadata_txt(file_path):
"""
Load distortion coefficients from a text file.
Parameters
----------
file_path : str
Path to a file.
Returns
-------
tuple of floats and list
Tuple of (xcenter, ycenter, list_fact).
"""
if ("\\" in file_path):
raise ValueError(
"Please use a file path following the Unix convention")
with open(file_path, 'r') as f:
x = f.read().splitlines()
list_data = []
for i in x:
list_data.append(float(i.split()[-1]))
xcenter = list_data[0]
ycenter = list_data[1]
list_fact = list_data[2:]
return xcenter, ycenter, list_fact | 44e6319aec6d77910e15e8890bcd78ffcdca3aa4 | 1,304 |
import torch
def _output_gradient(f, loss_function, dataset, labels, out0, batch_indices, chunk):
"""
internal function
"""
x = _getitems(dataset, batch_indices)
y = _getitems(labels, batch_indices)
if out0 is not None:
out0 = out0[batch_indices]
out = []
grad = 0
loss_value = 0
for i in [slice(i, i + chunk) for i in range(0, len(x), chunk)]:
o = f(x[i])
if out0 is not None:
o = o - out0[i]
l = loss_function(o, y[i])
assert l.shape == (len(o),)
l = l.sum() / len(x)
grad += gradient(l, f.parameters())
out.append(o)
loss_value += l.item()
return torch.cat(out), grad, loss_value | 252f79065ce953eb99df17842d62786cebadee67 | 1,305 |
def __material_desc_dict(m, d):
""" Unpack positions 18-34 into material specific dict. """
return dict(zip(MD_FIELDS[m],
{"BK": __material_bk, "CF": __material_cf,
"MP": __material_mp, "MU": __material_mu,
"CR": __material_cr, "VM": __material_vm,
"MX": __material_mx}[m](d))) | 9f87ce915bd5d226fa1d1ffd5991779c9a4fbdba | 1,306 |
def toint(x):
"""Try to convert x to an integer number without raising an exception."""
try: return int(x)
except: return x | bd1a675cb3f8f5c48e36f8f405a89dc637f3f558 | 1,307 |
def obtain_time_image(x, y, centroid_x, centroid_y, psi, time_gradient, time_intercept):
"""Create a pulse time image for a toymodel shower. Assumes the time development
occurs only along the longitudinal (major) axis of the shower, and scales
linearly with distance along the axis.
Parameters
----------
x : u.Quantity[length]
X camera coordinate to evaluate the time at.
Usually the array of pixel X positions
y : u.Quantity[length]
Y camera coordinate to evaluate the time at.
Usually the array of pixel Y positions
centroid_x : u.Quantity[length]
X camera coordinate for the centroid of the shower
centroid_y : u.Quantity[length]
Y camera coordinate for the centroid of the shower
psi : convertible to `astropy.coordinates.Angle`
rotation angle about the centroid (0=x-axis)
time_gradient : u.Quantity[time/length]
Rate at which the time changes with distance along the shower axis
time_intercept : u.Quantity[time]
Pulse time at the shower centroid
Returns
-------
float or ndarray
Pulse time in nanoseconds at (x, y)
"""
longitudinal, _ = camera_to_shower_coordinates(x, y, centroid_x, centroid_y, psi)
longitudinal_m = longitudinal.to_value(u.m)
time_gradient_ns_m = time_gradient.to_value(u.ns / u.m)
time_intercept_ns = time_intercept.to_value(u.ns)
return longitudinal_m * time_gradient_ns_m + time_intercept_ns | 4a57399e041c0fd487fe039e5091986438d4b8b8 | 1,308 |
import re
def remove_comment(to_remove, infile):
"""Removes trailing block comments from the end of a string.
Parameters:
to_remove: The string to remove the comment from.
infile: The file being read from.
Returns:
The paramter string with the block comment removed (if comment was
present in string).
"""
start_comment = re.search('\s*(\/\*|//)', to_remove)
# Remove comments if they are in the matched group.
if start_comment:
end_comment = re.search('.*\*\/', to_remove)
if end_comment or ('//' in to_remove and not '/*' in to_remove) :
removed = to_remove[:start_comment.start(0)] + '\n'
return removed
while not end_comment:
to_remove = next(infile)
end_comment = end_comment = re.search('.*\*\/', to_remove)
return ''
else:
removed = to_remove
return removed | 0172b295c9a023eb96fbad7a6c3a388874e106bc | 1,309 |
def generate_notification_header(obj):
"""
Generates notification header information based upon the object -- this is
used to preface the notification's context.
Could possibly be used for "Favorites" descriptions as well.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:returns: str with a human readable identification of the object
"""
generate_notification_header_handler = NotificationHeaderManager.get_header_handler(obj._meta['crits_type'])
if generate_notification_header_handler is not None:
return generate_notification_header_handler(obj)
else:
return "%s: %s" % (obj._meta['crits_type'], str(obj.id)) | e02c2bdd9827077a49236ed7aa813458659f453c | 1,310 |
def promptyn(msg, default=None):
""" Display a blocking prompt until the user confirms """
while True:
yes = "Y" if default else "y"
if default or default is None:
no = "n"
else:
no = "N"
confirm = raw_input("%s [%s/%s]" % (msg, yes, no))
confirm = confirm.lower().strip()
if confirm == "y" or confirm == "yes":
return True
elif confirm == "n" or confirm == "no":
return False
elif len(confirm) == 0 and default is not None:
return default | 1bec535462b8e859bac32c424e8500c432eb7751 | 1,311 |
def plan_launch_spec(state):
""" Read current job params, and prescribe the next training job to launch
"""
last_run_spec = state['run_spec']
last_warmup_rate = last_run_spec['warmup_learning_rate']
add_batch_norm = last_run_spec['add_batch_norm']
learning_rate = last_run_spec['learning_rate']
if last_warmup_rate / 5 >= 1e-3:
logger.info('Reducing warmup rate by 1/5')
state['history']['num_warmup_adjustments'] += 1
state['run_spec']['warmup_learning_rate'] = last_warmup_rate * 0.5
state['next_action'] = 'launch_new'
elif add_batch_norm == 0:
logger.info('Adding batch normalization layer')
state['history']['num_batch_layer_adjustments'] += 1
state['run_spec']['add_batch_norm'] = 1 # we are only changing the model by adding batch layers
# prior to ELU. But can make more tweaks here.
state['next_action'] = 'launch_new'
elif learning_rate * 0.9 > 0.001:
state['run_spec']['learning_rate'] = learning_rate * 0.9
state['history']['num_learning_rate_adjustments'] += 1
state['next_action'] = 'launch_new'
else:
state['next_action'] = 'end'
return state | 5fee797f24db05eccb49a5b10a9d88917987f905 | 1,312 |
def ssgenTxOut0():
"""
ssgenTxOut0 is the 0th position output in a valid SSGen tx used to test out the
IsSSGen function
"""
# fmt: off
return msgtx.TxOut(
value=0x00000000, # 0
version=0x0000,
pkScript=ByteArray(
[
0x6a, # OP_RETURN
0x24, # 36 bytes to be pushed
0x94, 0x8c, 0x76, 0x5a, # 32 byte hash
0x69, 0x14, 0xd4, 0x3f,
0x2a, 0x7a, 0xc1, 0x77,
0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0x52, 0xde, 0x3d, 0x7c,
0x00, 0xe3, 0x23, 0x21, # 4 byte height
]
),
)
# fmt: on | 3bee03ef9bc3a326fff381b6d2594c3ea4c909e7 | 1,313 |
def sexag_to_dec(sexag_unit):
""" Converts Latitude and Longitude Coordinates from the Sexagesimal Notation
to the Decimal/Degree Notation"""
add_to_degree = (sexag_unit[1] + (sexag_unit[2]/60))/60
return sexag_unit[0]+add_to_degree | c9c4394920d2b483332eb4a81c0f0d9010179339 | 1,314 |
import apysc as ap
from typing import Any
from typing import Tuple
def is_immutable_type(value: Any) -> bool:
"""
Get a boolean value whether specified value is immutable
type or not.
Notes
-----
apysc's value types, such as the `Int`, are checked
as immutable since these js types are immutable.
Parameters
----------
value : Any
Target value to check.
Returns
-------
result : bool
If a specified value is immutable, then True
will be set.
"""
immutable_types: Tuple = (
int, float, bool, str, complex, tuple, range, bytes,
ap.Int, ap.Number, ap.String, ap.Boolean,
)
if isinstance(value, immutable_types):
return True
return False | 79538477528df2e13eaf806231e2f43c756abacd | 1,315 |
def add_column_node_type(df: pd.DataFrame) -> pd.DataFrame:
"""Add column `node_type` indicating whether a post is a parent or a leaf node
Args:
df: The posts DataFrame with the columns `id_post` and `id_parent_post`.
Returns:
df: A copy of df, extended by `node_type`.
"""
if "node_type" not in df.columns:
df_parent_posts = pd.DataFrame({"id_post": df.query("id_parent_post == id_parent_post").id_parent_post.unique()})
df_parent_posts["node_type"] = "parent"
return df.merge(df_parent_posts, how="left", on="id_post").replace({"node_type": np.nan}, "leaf")
else:
return df.copy() | 3ad8a12f1a872d36a14257bdaa38229768714fa5 | 1,316 |
import random
def read_motifs(fmotif):
"""
create a random pool of motifs to choose from for the monte-carlo simulations
"""
motif_pool = []
for line in open(fmotif):
if not line.strip(): continue
if line[0] == "#": continue
motif, count = line.rstrip().split()
motif_pool.extend(motif * int(count))
random.shuffle(motif_pool)
return motif_pool | 168a7f82727917aa5ca1a30b9aa9df1699261585 | 1,317 |
from App import Proxys
import math
def createCone( axis=1, basePos=-1, tipPos=1, radius=1, colour=(0.6,0.6,0.6), moiScale = 1, withMesh = True, **kwargs ):
"""
Create a rigid body for a cone with the specified attributes (axis is 0:x, 1:y, 2:z). Other rigid body parameters can be specified with keyword arguments, look at
App.Proxys.RigidBody for more details on available arguments. The following arguments will not be used:
meshes, moi, cdps.
If a negative mass parameter is specified, it will be scaled by the box volume and made positive.
"""
_fixMass( kwargs, math.pi*radius*radius*math.fabs(tipPos-basePos) )
proxy = Proxys.RigidBody( **kwargs )
return _createCone( proxy, axis, basePos, tipPos, radius, colour, moiScale, withMesh ) | 43a7e0134627ed8069359c29bc53f354d70498d9 | 1,318 |
from typing import Union
def ef(candles: np.ndarray, lp_per: int = 10, hp_per: int = 30, f_type: str = "Ehlers", normalize: bool = False, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
# added to definition : use_comp: bool = False, comp_intensity: float = 90.0,
"""
https://www.tradingview.com/script/kPe86Nbc-Roofing-Filter-DW/
compression function not working
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if f_type == "Ehlers":
roof = erf( source, hp_per, lp_per)
elif f_type == "Gaussian":
roof = grf( source, hp_per, lp_per)
elif f_type == "Butterworth":
roof = brf( source, hp_per, lp_per)
rms = RMS(source, roof, np.round((hp_per + lp_per)/2))
if roof[-1] > 0:
norm_roof = roof/rms
elif roof[-1] < 0:
norm_roof = -np.abs(roof)/rms
else:
norm_roof = 0
if normalize:
filt = norm_roof
else:
filt = roof
if sequential:
return filt
else:
return filt[-1] | 6dd19e9a1cb5a8f293f4ec3eebef625e2b05bcfe | 1,319 |
from typing import Dict
def parse_displays(config: Dict) -> Dict[str, QueryDisplay]:
"""Parse display options from configuration."""
display_configs = config.get("displays")
if not display_configs:
return {}
displays = {}
for name, display_config in display_configs.items():
displays[name] = QueryDisplay(
name=name,
nrql=display_config.get("nrql"),
visualization=WidgetVisualization.from_str(display_config["visualization"]),
)
return displays | a7f3c32d3ceaf6c39ea16ee7e2f7ec843036487e | 1,320 |
async def update_result(user: dict, form: dict) -> str:
"""Extract form data and update one result and corresponding start event."""
informasjon = await create_finish_time_events(user, "finish_bib", form) # type: ignore
return informasjon | b9b97f3b08f08dc35a0744f38323d76ecb0c3fba | 1,321 |
from typing import List
import torch
import copy
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask | 98a35b477338f0f472d34b49f4be9f9cd0303654 | 1,322 |
def has_ao_num(trexio_file) -> bool:
"""Check that ao_num variable exists in the TREXIO file.
Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function.
Returns:
True if the variable exists, False otherwise
Raises:
- Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error.
- Exception from some other error (e.g. RuntimeError).
"""
try:
rc = pytr.trexio_has_ao_num(trexio_file.pytrexio_s)
if rc == TREXIO_FAILURE:
raise Error(rc)
except:
raise
if rc == TREXIO_SUCCESS:
return True
else:
return False | 6a10204cc5d64a71e991fed1e43fd9ff81a250b9 | 1,323 |
def teapot(size=1.0):
"""
Z-axis aligned Utah teapot
Parameters
----------
size : float
Relative size of the teapot.
"""
vertices, indices = data.get("teapot.obj")
xmin = vertices["position"][:,0].min()
xmax = vertices["position"][:,0].max()
ymin = vertices["position"][:,1].min()
ymax = vertices["position"][:,1].max()
zmin = vertices["position"][:,2].min()
zmax = vertices["position"][:,2].max()
# Centering
vertices["position"][:,0] -= xmin + (xmax-xmin)/2
vertices["position"][:,1] -= ymin + (ymax-ymin)/2
vertices["position"][:,2] -= zmin + (zmax-zmin)/2
# Rotation to align on Z-axis
X = vertices["position"][:,0].copy()
Y = vertices["position"][:,1].copy()
Z = vertices["position"][:,2].copy()
NX = vertices["normal"][:,0].copy()
NY = vertices["normal"][:,1].copy()
NZ = vertices["normal"][:,2].copy()
vertices["position"][:,0] = X
vertices["position"][:,1] = Z
vertices["position"][:,2] = Y
vertices["normal"][:,0] = NX
vertices["normal"][:,1] = NZ
vertices["normal"][:,2] = NY
# Scaling according to height
vertices["position"] *= 2.0*size/(zmax-zmin)
return vertices, indices | 94cef5111384599f74bfe59fb97ba417c738ca50 | 1,324 |
def f30(x, rotations=None, shifts=None, shuffles=None):
"""
Composition Function 10 (N=3)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotations (matrix): Optional rotation matrices (NxDxD). If None
(default), the official matrices from the benchmark suite will be
used.
shifts (array): Optional shift vectors (NxD). If None (default), the
official vectors from the benchmark suite will be used.
shuffles (array): Optional shuffle vectors (NxD). If None (default), the
official permutation vectors from the benchmark suite will be used.
"""
nx = len(x)
if rotations is None:
rotations = transforms.rotations_cf[nx][9]
if shifts is None:
shifts = transforms.shifts_cf[9]
if shuffles is None:
shuffles = transforms.shuffles_cf[nx][1]
N = 3
funcs = [hybrid.f15, hybrid.f18, hybrid.f19]
sigmas = np.array([10.0, 30.0, 50.0])
biases = np.array([0.0, 100.0, 200.0])
offsets = np.array(
[1500, 1800, 1900]
) # subtract F* added at the end of the functions
vals = np.zeros(N)
w = np.zeros(N)
w_sm = 0.0
for i in range(0, N):
x_shifted = x - shifts[i][:nx]
vals[i] = funcs[i](
x, rotation=rotations[i], shift=shifts[i][:nx], shuffle=shuffles[i]
)
vals[i] -= offsets[i]
w[i] = _calc_w(x_shifted, sigmas[i])
w_sm += w[i]
if w_sm != 0.0:
w /= w_sm
else:
w = np.full(N, 1 / N)
return np.sum(w * (vals + biases)) + 3000 | d2bfe7a0bba501e1d7d5bcf29475ecc36f73913b | 1,325 |
def loadNode( collada, node, localscope ):
"""Generic scene node loading from a xml `node` and a `collada` object.
Knowing the supported nodes, create the appropiate class for the given node
and return it.
"""
if node.tag == tag('node'): return Node.load(collada, node, localscope)
elif node.tag == tag('translate'): return TranslateTransform.load(collada, node)
elif node.tag == tag('rotate'): return RotateTransform.load(collada, node)
elif node.tag == tag('scale'): return ScaleTransform.load(collada, node)
elif node.tag == tag('matrix'): return MatrixTransform.load(collada, node)
elif node.tag == tag('lookat'): return LookAtTransform.load(collada, node)
elif node.tag == tag('instance_geometry'): return GeometryNode.load(collada, node)
elif node.tag == tag('instance_camera'): return CameraNode.load(collada, node)
elif node.tag == tag('instance_light'): return LightNode.load(collada, node)
elif node.tag == tag('instance_controller'): return ControllerNode.load(collada, node)
elif node.tag == tag('instance_node'): return NodeNode.load(collada, node, localscope)
elif node.tag == tag('extra'):
return ExtraNode.load(collada, node)
elif node.tag == tag('asset'):
return None
else: raise DaeUnsupportedError('Unknown scene node %s' % str(node.tag)) | 68083c4490e44e71f33d1221776837f2c1d59b69 | 1,326 |
def create_xla_tff_computation(xla_computation, type_spec):
"""Creates an XLA TFF computation.
Args:
xla_computation: An instance of `xla_client.XlaComputation`.
type_spec: The TFF type of the computation to be constructed.
Returns:
An instance of `pb.Computation`.
"""
py_typecheck.check_type(xla_computation, xla_client.XlaComputation)
py_typecheck.check_type(type_spec, computation_types.FunctionType)
return pb.Computation(
type=type_serialization.serialize_type(type_spec),
xla=pb.Xla(
hlo_module=pack_xla_computation(xla_computation),
parameter=_make_xla_binding_for_type(type_spec.parameter),
result=_make_xla_binding_for_type(type_spec.result))) | 5a02051913026029cab95d12199eb321fa511654 | 1,327 |
def render_contact_form(context):
"""
Renders the contact form which must be in the template context.
The most common use case for this template tag is to call it in the
template rendered by :class:`~envelope.views.ContactView`. The template
tag will then render a sub-template ``envelope/contact_form.html``.
.. versionadded:: 0.7.0
"""
try:
form = context['form']
except KeyError:
raise template.TemplateSyntaxError("There is no 'form' variable in the template context.")
return {
'form': form,
} | e243502fadbf094ed7277ec5db770a3b209174e2 | 1,328 |
from typing import List
from typing import Dict
def get_basic_project(reviews: int = 0) -> List[Dict]:
"""Get basic project config with reviews."""
reviews = max(reviews, MIN_REVIEW)
reviews = min(reviews, MAX_REVIEW)
middle_stages, entry_point = _get_middle_stages(reviews, OUTPUT_NAME)
input_stage = {
"brickName": "labelset-input",
"routing": {
"nextStageName": entry_point,
},
"stageName": "Input",
"stageConfig": {},
}
output_stage = {
"brickName": "labelset-output",
"stageName": OUTPUT_NAME,
"routing": {
"nextStageName": "END",
},
"stageConfig": {},
}
temp = [input_stage] + middle_stages + [output_stage]
return temp | 14c2252dec69ebbcec04fbd00de0fa5ac6d1cdf7 | 1,329 |
import re
def choose_quality(link, name=None, selected_link=None):
"""
choose quality for scraping
Keyword Arguments:
link -- Jenitem link with sublinks
name -- Name to display in dialog (default None)
"""
if name is None:
name = xbmc.getInfoLabel('listitem.label')
if link.startswith("http") or link.startswith("plugin"):
sublinks = [link]
else:
jen_link = JenItem(link)
sublinks = jen_link.getAll("sublink")
if not sublinks:
sublinks = [jen_link]
links = []
message = get_link_message()
if selected_link is None:
default_link = ADDON.getSetting("default_link")
else:
default_link = selected_link
link_dialog = ADDON.getSetting("use_link_dialog") == "true"
direct_links = False
for sublink in sublinks:
if link_dialog and "search" in sublink:
continue
if "searchsd" in sublink:
if default_link == "SD":
return sublink
label = 'SD'
if message['SD'] != '':
label += ' (%s)' % message['SD']
new_item = (label, sublink)
elif "search" in sublink:
if default_link == "HD":
return sublink
label = 'HD'
if message['HD'] != '':
label += ' (%s)' % message['HD']
new_item = (label, sublink)
else:
direct_links = True
match = re.findall("(.*?)\((.*?)\)", sublink)
if match:
new_item = ('%s' % match[0][1], match[0][0])
else:
new_item = ('Link %s' % (int(sublinks.index(sublink)) + 1),
sublink)
links.append(new_item)
if link_dialog and (not direct_links or len(sublinks) > 1):
links.append(("Search", "search"))
if len(links) == 1:
url = links[0][1]
return url
select = xbmcgui.Dialog().select(name, [i[0] for i in links])
if select == -1:
return False
else:
url = links[select][1]
return url | a75214cd0acd1c0e3ede34241baeb07342aadb1b | 1,330 |
def picp_loss(target, predictions, total = True):
"""
Calculate 1 - PICP (see eval_metrics.picp for more details)
Parameters
----------
target : torch.Tensor
The true values of the target variable
predictions : list
- predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor)
- predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor)
total : bool, default = True
- When total is set to True, return a scalar value for 1- PICP
- When total is set to False, return 1-PICP along the horizon
Returns
-------
torch.Tensor
Returns 1-PICP, either as a scalar or over the horizon
"""
return 1-picp(target, predictions, total) | a6d8d150241b1a2f8dda00c9c182ba7196c65585 | 1,331 |
def index_wrap(data, index):
"""
Description: Select an index from an array data
:param data: array data
:param index: index (e.g. 1,2,3, account_data,..)
:return: Data inside the position index
"""
return data[index] | 42b53f1d9edf237b904f822c15ad1f1b930aa69c | 1,333 |
def mzml_to_pandas_df(filename):
"""
Reads mzML file and returns a pandas.DataFrame.
"""
cols = ["retentionTime", "m/z array", "intensity array"]
slices = []
file = mzml.MzML(filename)
while True:
try:
data = file.next()
data["retentionTime"] = data["scanList"]["scan"][0]["scan time"] / 60
del data["scanList"]
slices.append(pd.DataFrame(data))
except:
break
df = pd.concat(slices)[cols]
df_to_numeric(df)
return df | 2c6f1956d7c499c9f22bc85665bd6b5ce9ed51c3 | 1,335 |
def metadata_volumes(response: Response,
request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST),
sourcetype: str=Query(None, title=opasConfig.TITLE_SOURCETYPE, description=opasConfig.DESCRIPTION_PARAM_SOURCETYPE),
sourcecode: str=Query(None, title=opasConfig.TITLE_SOURCECODE, description=opasConfig.DESCRIPTION_SOURCECODE),
limit: int=Query(200, title=opasConfig.TITLE_LIMIT, description=opasConfig.DESCRIPTION_LIMIT),
offset: int=Query(0, title=opasConfig.TITLE_OFFSET, description=opasConfig.DESCRIPTION_OFFSET),
):
"""
## Function
<b>Return a list of volumes for a SourceCode (aka, PEPCode (e.g., IJP)) per the limit and offset parameters</b>
## Return Type
models.JournalInfoList
## Status
This endpoint is working.
## Sample Call
http://localhost:9100/v1/Metadata/Volumes/CPS/
## Notes
## Potential Errors
"""
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
# Solr is case sensitive, make sure arg is upper
try:
source_code = sourcecode.upper()
except:
source_code = None
src_exists = ocd.get_sources(source_code=source_code)
if not src_exists[0] and source_code != "*" and source_code != "ZBK" and source_code is not None: # ZBK not in productbase table without booknum
response.status_code = httpCodes.HTTP_400_BAD_REQUEST
status_message = f"Failure: Bad SourceCode {source_code}"
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX,
session_info=session_info,
params=request.url._url,
item_of_interest=f"{source_code}",
return_status_code=response.status_code,
status_message=status_message
)
raise HTTPException(
status_code=response.status_code,
detail=status_message
)
else:
try:
ret_val = opasAPISupportLib.metadata_get_volumes(source_code, source_type=sourcetype, req_url=request.url, limit=limit, offset=offset)
except Exception as e:
response.status_code = httpCodes.HTTP_400_BAD_REQUEST,
status_message = "Error: {}".format(e)
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX,
session_info=session_info,
params=request.url._url,
item_of_interest=f"{source_code}",
return_status_code=response.status_code,
status_message=status_message
)
raise HTTPException(
status_code=response.status_code,
detail=status_message
)
else:
response.status_code = httpCodes.HTTP_200_OK
status_message = opasCentralDBLib.API_STATUS_SUCCESS
# 2020-07-23 No need to log success for these, can be excessive.
#ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX,
#session_info=session_info,
#params=request.url._url,
#item_of_interest=f"{source_code}",
#return_status_code=response.status_code,
#status_message=status_message
#)
return ret_val | e8e4a686eaac21b20f2d758b8bc7de74d38571ab | 1,336 |
def do_step_right(pos: int, step: int, width: int) -> int:
"""Takes current position and do 3 steps to the
right. Be aware of overflow as the board limit
on the right is reached."""
new_pos = (pos + step) % width
return new_pos | 530f3760bab00a7b943314ca735c3a11343b87f5 | 1,337 |
def log_agm(x, prec):
"""
Fixed-point computation of -log(x) = log(1/x), suitable
for large precision. It is required that 0 < x < 1. The
algorithm used is the Sasaki-Kanada formula
-log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1]
For faster convergence in the theta functions, x should
be chosen closer to 0.
Guard bits must be added by the caller.
HYPOTHESIS: if x = 2^(-n), n bits need to be added to
account for the truncation to a fixed-point number,
and this is the only significant cancellation error.
The number of bits lost to roundoff is small and can be
considered constant.
[1] Richard P. Brent, "Fast Algorithms for High-Precision
Computation of Elementary Functions (extended abstract)",
http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf
"""
x2 = (x*x) >> prec
# Compute jtheta2(x)**2
s = a = b = x2
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
s += a
s += (MPZ_ONE<<prec)
s = (s*s)>>(prec-2)
s = (s*isqrt_fast(x<<prec))>>prec
# Compute jtheta3(x)**2
t = a = b = x
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
t += a
t = (MPZ_ONE<<prec) + (t<<1)
t = (t*t)>>prec
# Final formula
p = agm_fixed(s, t, prec)
return (pi_fixed(prec) << prec) // p | e873db3a45270eb077d9dc17f2951e2e791ad601 | 1,338 |
import unicodedata
def simplify_name(name):
"""Converts the `name` to lower-case ASCII for fuzzy comparisons."""
return unicodedata.normalize('NFKD',
name.lower()).encode('ascii', 'ignore') | a7c01471245e738fce8ab441e3a23cc0a67c71be | 1,339 |
async def parse_regex(opsdroid, skills, message):
"""Parse a message against all regex skills."""
matched_skills = []
for skill in skills:
for matcher in skill.matchers:
if "regex" in matcher:
opts = matcher["regex"]
matched_regex = await match_regex(message.text, opts)
if matched_regex:
message.regex = matched_regex
for regroup, value in matched_regex.groupdict().items():
message.update_entity(regroup, value, None)
matched_skills.append(
{
"score": await calculate_score(
opts["expression"], opts["score_factor"]
),
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills | aa3ad8ff48854b974ba90135b510074644e10028 | 1,340 |
def interpolate_minusones(y):
"""
Replace -1 in the array by the interpolation between their neighbor non zeros points
y is a [t] x [n] array
"""
x = np.arange(y.shape[0])
ynew = np.zeros(y.shape)
for ni in range(y.shape[1]):
idx = np.where(y[:,ni] != -1)[0]
if len(idx)>1:
last_value = y[idx[-1],ni]
interp = interp1d(x[idx],y[idx,ni], kind='previous',fill_value=(0,last_value),bounds_error = False)
ynew[:,ni] = interp(x)
elif len(idx) == 1:
last_value = y[idx[-1],ni]
ynew[:,ni] = last_value
return ynew | db3e347ba75a39f40cd3ee90481efe8392ce08ed | 1,341 |
def precision(y, yhat, positive=True):
"""Returns the precision (higher is better).
:param y: true function values
:param yhat: predicted function values
:param positive: the positive label
:returns: number of true positive predictions / number of positive predictions
"""
table = contingency_table(y, yhat, positive)
return _precision(table) | f643631781565ddb049c1c4d22c6e5ea64ce4a22 | 1,342 |
def add_posibility_for_red_cross(svg):
"""add a symbol which represents a red cross in a white circle
Arguments:
svg {Svg} -- root element
"""
symbol = Svg(etree.SubElement(svg.root,
'symbol',
{'id': 'red_cross',
'view_box': '0 0 20 20'
}))
symbol.create_circle(
[10, 10],
9,
"red_cross_circle",
fill_colour="white",
additional_arguments={'stroke': 'black'}
)
symbol.create_rectangle(
[4, 8], [12, 4], "red_cross_rect_1", fill_colour="red")
symbol.create_rectangle(
[8, 4], [4, 12], "red_cross_rect_2", fill_colour="red")
return symbol | df621fb907187a36cb3f7387047a8cda6cb42992 | 1,343 |
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testDummy"
],
"zzcomponent":
[ "testDummy"
],
"integration":
[ "testLoad"
, "testStartStop"
, "testVolume"
],
"pending":
[ "testDummy"
]
}
return TestUtils.getTestSuite(testITunes, testdict, select=select) | 529cb8d6312eaa129a52f1679294d85c1d9bfbd0 | 1,345 |
import ctypes
def dasopw(fname):
"""
Open a DAS file for writing.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopw_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int(0)
libspice.dasopw_c(fname, ctypes.byref(handle))
return handle.value | 63f164ba82e6e135763969c8823d7eb46dd52c0e | 1,346 |
import re
def is_ncname(value):
"""
BNode identifiers must be valid NCNames.
From the `W3C RDF Syntax doc <http://www.w3.org/TR/REC-rdf-syntax/#section-blank-nodeid-event>`_
"The value is a function of the value of the ``identifier`` accessor.
The string value begins with "_:" and the entire value MUST match
the `N-Triples nodeID <http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#nodeID>`_ production".
The nodeID production is specified to be a `name <http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#name>`_
name ::= [A-Za-z][A-Za-z0-9]*
>>> assert is_ncname('') == False
>>> assert is_ncname('999') == False
>>> assert is_ncname('x') == True
>>> assert is_ncname(u'x') == True
>>> assert is_ncname(u'Michèle') == True
However, vanilla uuid4s are not necessarily NCNames:
>>> assert is_ncname('6fa459ea-ee8a-3ca4-894e-db77e160355e') == False
So this has to be finessed with an appropriate prefix ...
>>> assert is_ncname("urn:uuid:"+str(uuid4())) == True
>>> from rdflib import BNode
>>> assert is_ncname(BNode(_sn_gen=bnode_uuid, _prefix="urn:uuid:")) == True
"""
ncnameexp = re.compile("[A-Za-z][A-Za-z0-9]*")
if ncnameexp.match(value):
return True
else:
return False | 78cbfe9209b9f39cd6bc90c0ed5c8e5291bc1562 | 1,347 |
from typing import Dict
def health_func() -> Dict[str, str]:
"""Give the user the API health."""
return "ok" | 5c14795d9d0560ddb34b193575917ac184dbe8a3 | 1,348 |
def queue_worker(decoy: Decoy) -> QueueWorker:
"""Get a mock QueueWorker."""
return decoy.mock(cls=QueueWorker) | aec88b037e393b195abd0c2704e8f2784e9a9f8d | 1,349 |
def astra_fp_3d(volume, proj_geom):
"""
:param proj_geom:
:param volume:
:return:3D sinogram
"""
detector_size = volume.shape[1]
slices_number = volume.shape[0]
rec_size = detector_size
vol_geom = build_volume_geometry_3d(rec_size, slices_number)
sinogram_id = astra.data3d.create('-sino', proj_geom)
# Create a data object for the reconstruction
rec_id = astra.data3d.create('-vol', vol_geom, data=volume)
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('FP3D_CUDA')
cfg['VolumeDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = {}
alg_id = astra.algorithm.create(cfg)
astra.algorithm.run(alg_id, 1)
res_sino = astra.data3d.get(sinogram_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data3d.delete(rec_id)
astra.data3d.delete(sinogram_id)
astra.clear()
return res_sino | 7066bb61dc29fac331ffb13c6fe1432349eac185 | 1,350 |
def get_wf_neb_from_images(
parent,
images,
user_incar_settings,
additional_spec=None,
user_kpoints_settings=None,
additional_cust_args=None,
):
"""
Get a CI-NEB workflow from given images.
Workflow: NEB_1 -- NEB_2 - ... - NEB_n
Args:
parent (Structure): parent structure.
images ([Structure]): All images and two endpoints.
user_incar_settings([dict]): Additional user_incar_settings. Note that the order of the
list is set as: "parent", "ep_relax", "neb1", "neb2" etc., which contains
at least three elements. The first dict is for parent structure relaxation,
the second dict is for endpoints relaxation, and the rest are for NEB
calculations. For example, [{}, {}, {"IOPT": 7}, {"IOPT": 1}]. Besides,
user_incar_settings is used to determine how many NEB rounds will be. Default
is [{}, {}, {}].
additional_spec (dict): User spec settings to overwrite default_spec.
user_kpoints_settings ([dict]): Additional user_kpoints_settings, which contains at at
least three elements, which is similar to user_incar_settings. For example,
[{}, {}, {"grid_density": 100}] for the workflow from the parent structure
relaxation, then the endpoint relaxation followed by one-round NEB simulation.
Default values depend on the selected VaspInputSet.
additional_cust_args ([dict]): Optional parameters for RunVaspCustodian, same structure
with user_incar_settings and user_kpoints_settings.
Returns:
Workflow
"""
spec = _update_spec(additional_spec)
spec["parent"] = parent.as_dict()
assert isinstance(images, list) and len(images) >= 3
spec["neb"] = [[s.as_dict() for s in images]]
spec["_queueadapter"] = {
"nnodes": str(len(images) - 2),
"nodes": str(len(images) - 2),
}
if spec["neb_walltime"] is not None:
spec["_queueadapter"].update({"walltime": spec.get("neb_walltime")})
wf_name = spec["wf_name"]
# Assume one round NEB if user_incar_settings not provided.
user_incar_settings = user_incar_settings or [{}, {}, {}]
neb_round = len(user_incar_settings[2:])
user_kpoints_settings = user_kpoints_settings or [{"grid_density": 1000}] * (
neb_round + 2
)
additional_cust_args = additional_cust_args or [{}] * (neb_round + 2)
fws = []
# Get neb fireworks.
for n in range(neb_round):
fw = NEBFW(
spec=spec,
neb_label=str(n + 1),
from_images=True,
user_incar_settings=user_incar_settings[n + 2],
user_kpoints_settings=user_kpoints_settings[n + 2],
additional_cust_args=additional_cust_args[n + 2],
)
fws.append(fw)
# Build fireworks link
links_dict = {}
if neb_round >= 2:
for i in range(neb_round - 1):
links_dict[fws[i]] = [fws[i + 1]]
workflow = Workflow(fws, name=wf_name, links_dict=links_dict)
return workflow | 15ed110d3685c9d8de216733e8d87f6c07580529 | 1,351 |
def categorize_folder_items(folder_items):
"""
Categorize submission items into three lists: CDM, PII, UNKNOWN
:param folder_items: list of filenames in a submission folder (name of folder excluded)
:return: a tuple with three separate lists - (cdm files, pii files, unknown files)
"""
found_cdm_files = []
unknown_files = []
found_pii_files = []
for item in folder_items:
if _is_cdm_file(item):
found_cdm_files.append(item)
elif _is_pii_file(item):
found_pii_files.append(item)
else:
if not (_is_known_file(item) or _is_string_excluded_file(item)):
unknown_files.append(item)
return found_cdm_files, found_pii_files, unknown_files | 14e840817cce4cc91ed50d6d9dcfa1c19a2bcbeb | 1,352 |
def _broadcast_all(indexArrays, cshape):
"""returns a list of views of 'indexArrays' broadcast to shape 'cshape'"""
result = []
for i in indexArrays:
if isinstance(i, NDArray) and i._strides is not None:
result.append(_broadcast(i, cshape))
else:
result.append(i)
return tuple(result) | b7b98245bc534074e408d5c9592bf68ae53f580e | 1,353 |
def _none_tozero_array(inarray, refarray):
"""Repair an array which is None with one which is not
by just buiding zeros
Attributes
inarray: numpy array
refarray: numpy array
"""
if inarray is None:
if _check_ifarrays([refarray]):
inarray = np.zeros_like(refarray)
else:
if not _check_ifarrays([inarray]):
inarray = None
return inarray | 9b0852655a13b572106acc809d842ca38d24e707 | 1,354 |
def dpuGetExceptionMode():
"""
Get the exception handling mode for runtime N2Cube
Returns: Current exception handing mode for N2Cube APIs.
Available values include:
- N2CUBE_EXCEPTION_MODE_PRINT_AND_EXIT
- N2CUBE_EXCEPTION_MODE_RET_ERR_CODE
"""
return pyc_libn2cube.pyc_dpuGetExceptionMode() | fd33aba868a05f3cc196c89e3c2d428b0cce108a | 1,355 |
import re
def clean_links(links, category):
"""
clean up query fields for display as category buttons to browse by
:param links: list of query outputs
:param category: category of search from route
:return: list of cleansed links
"""
cleansedlinks = []
for item in links:
# remove blanks
if item == "" or item == "-":
continue
else:
#crop chromosome location output to eg 13p (check if substrate)
if category[:3] == 'Sub':
item = re.search("[\d|X|Y]+[pq]", item).group(0)
# remove forward slashes
item = item.replace("/", "&F&")
if item not in cleansedlinks:
cleansedlinks.append(item)
# sort the links
cleansedlinks.sort()
return cleansedlinks | f43af81a8ef8e5520726e886dd74d991c999a32d | 1,356 |
from typing import Any
from typing import Optional
def as_bool(value: Any, schema: Optional[BooleanType] = None) -> bool:
"""Parses value as boolean"""
schema = schema or BooleanType()
value = value.decode() if isinstance(value, bytes) else value
if value:
value = str(value).lower()
value = BOOLEANS.get(value)
validation.validate(schema.as_dict(), value)
return value | 7085b7bc7eccb2db95f5645b358e4940914f68f9 | 1,357 |
from typing import Dict
from typing import List
from typing import Tuple
def get_raw_feature(
column: Text, value: slicer_lib.FeatureValueType,
boundaries: Dict[Text, List[float]]
) -> Tuple[Text, slicer_lib.FeatureValueType]:
"""Get raw feature name and value.
Args:
column: Raw or transformed column name.
value: Raw or transformed column value.
boundaries: Dictionary containing quantile boundaries of features keyed by
column name.
Returns:
Tuple of raw column name and raw column value.
"""
if column.startswith(auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX):
raw_feature = column[len(auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX
):]
(start, end) = auto_slice_key_extractor.get_bucket_boundary(
value, boundaries[raw_feature])
return (raw_feature, _format_boundary(start, end))
return (column, value) | 29323b8e1a7ef32f19ff94f31efca20567780aa4 | 1,358 |
from typing import Union
def ndmi(nir: Union[xr.DataArray, np.ndarray, float, int],
swir1: Union[xr.DataArray, np.ndarray, float, int]) -> \
Union[xr.DataArray, np.ndarray, float, int]:
"""
Normalized difference moisture index.
Sentinel-2: B8A, B11
Parameters
----------
nir : xr.DataArray or np.ndarray or float or int
Near infrared band acquisition.
swir1 : xr.DataArray or np.ndarray or float or int
Short wave infrared band acquisition.
Returns
-------
same as input:
Normalised difference moisture index.
"""
return utils.normalized_difference(nir, swir1) | f66a68cd75d9c030c0257e1d543c2caf9efcf652 | 1,359 |
def _collect_data_and_enum_definitions(parsed_models: dict) -> dict[str, dict]:
"""
Collect all data and enum definitions that are referenced as interface messages or as a nested type within an interface message.
Args:
parsed_models: A dict containing models parsed from an AaC yaml file.
Returns:
A dict of data message type keys to data message parsed model values
"""
def collect_nested_types(interface_data_message_types: list[str]):
nested_types = []
for message_type in interface_data_message_types:
data_model = parsed_models[message_type]["data"]
for field in data_model.get("fields"):
field_type = field.get("type")
if field_type in parsed_models:
nested_types.append(field_type)
return list(set(nested_types))
def collect_behaviors(model_with_behaviors):
return util.search(model_with_behaviors, ["model", "behavior"])
def convert_behavior_io_to_data_type(behavior_io_model):
return behavior_io_model.get("type")
def collect_data_message_types(behavior_model):
inputs = behavior_model.get("input") or []
outputs = behavior_model.get("output") or []
return list(map(convert_behavior_io_to_data_type, inputs + outputs))
model_definitions = util.get_models_by_type(parsed_models, "model")
behaviors = list(flatten(map(collect_behaviors, model_definitions.values())))
interface_data_message_types = list(set(flatten(map(collect_data_message_types, behaviors))))
all_definitions_types_to_generate = interface_data_message_types + collect_nested_types(interface_data_message_types)
return {data_message_type: parsed_models[data_message_type] for data_message_type in all_definitions_types_to_generate} | 0d561003c8cdbe7d2eb7df2f03d5939f70d81467 | 1,360 |
def _list_goals(context, message):
"""Show all installed goals."""
context.log.error(message)
# Execute as if the user had run "./pants goals".
return Phase.execute(context, 'goals') | 5e823770528e97b4254e426a2d99113d119368b0 | 1,361 |
def values(df, varname):
"""Values and counts in index order.
df: DataFrame
varname: strign column name
returns: Series that maps from value to frequency
"""
return df[varname].value_counts().sort_index() | ea548afc8e0b030e441baa54abad32318c9c007f | 1,362 |
def get_or_none(l, n):
"""Get value or return 'None'"""
try:
return l[n]
except (TypeError, IndexError):
return 'None' | c46a0f4c8edc9286b0122f1643e24a04113a5bfc | 1,363 |
def pfam_clan_to_pdb(clan):
"""get a list of associated PDB ids for given pfam clan access key.
:param clan: pfam accession key of clan
:type clan: str
:return: List of associated PDB ids
:rettype:list"""
url='http://pfam.xfam.org/clan/'+clan+'/structures'
pattern='/structure/[A-Z, 0-9]{4}'
return _xfam_to(url,pattern) | 820e8a058edfeee256ab01281020c6e38e2d7c6d | 1,364 |
def fib(n):
"""Compute the nth Fibonacci number.
>>> fib(8)
21
"""
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-2) + fib(n-1) | 0db631be60754376e1a9287a4486ceb5ad7e392f | 1,365 |
from typing import List
from typing import Union
def score_tours_absolute(problems: List[N_TSP], tours: List[Union[int, NDArray]]) -> NDArray:
"""Calculate tour lengths for a batch of tours.
Args:
problems (List[N_TSP]): list of TSPs
tours (List[Union[int, NDArray]]): list of tours (in either index or segment format)
Returns:
NDArray: tour lengths
"""
result = np.ndarray((len(problems),), dtype=np.float)
for i, (p, t) in enumerate(zip(problems, tours)):
result[i] = p.score(t)
return result | b13ad2df2bfaf58f2b6989f2f2e67d917475b5bb | 1,367 |
def has(pred: Pred, seq: Seq) -> bool:
"""
Return True if sequence has at least one item that satisfy the predicate.
"""
for x in seq:
if pred(x):
return True
return False | bc41ceb21804cd273d0c2a71327f63f2269763d9 | 1,368 |
from typing import Optional
from typing import Union
from typing import List
from typing import Dict
from typing import Any
import re
from datetime import datetime
def _get_dataset_domain(
dataset_folder: str,
is_periodic: bool,
spotlight_id: Optional[Union[str, List]] = None,
time_unit: Optional[str] = "day",
):
"""
Returns a domain for a given dataset as identified by a folder. If a
time_unit is passed as a function parameter, the function will assume
that the domain is periodic and with only return the min/max dates,
otherwise ALL dates available for that dataset/spotlight will be returned.
Params:
------
dataset_folder (str): dataset folder to search within
time_unit (Optional[str]): time_unit from the dataset's metadata json file
spotlight_id (Optional[str]): a dictionary containing the
`spotlight_id` of a spotlight to restrict the
domain search to.
time_unit (Optional[str] - one of ["day", "month"]):
Wether the {date} object in the S3 filenames should be matched
to YYYY_MM_DD (day) or YYYYMM (month)
Return:
------
List[datetime]
"""
s3_keys_args: Dict[str, Any] = {"prefix": dataset_folder}
if spotlight_id:
s3_keys_args["spotlight_id"] = spotlight_id
keys = _gather_s3_keys(**s3_keys_args)
if not keys:
raise NoKeysFoundForSpotlight
dates = []
for key in keys:
# matches either dates like: YYYYMM or YYYY_MM_DD
pattern = re.compile(
r"[^a-zA-Z0-9]((?P<YEAR>\d{4})_(?P<MONTH>\d{2})_(?P<DAY>\d{2}))[^a-zA-Z0-9]"
)
if time_unit == "month":
pattern = re.compile(
r"[^a-zA-Z0-9](?P<YEAR>(\d{4}))(?P<MONTH>(\d{2}))[^a-zA-Z0-9]"
)
result = pattern.search(key, re.IGNORECASE,)
if not result:
continue
date = None
try:
date = datetime.datetime(
int(result.group("YEAR")),
int(result.group("MONTH")),
int(result.groupdict().get("DAY", 1)),
)
except ValueError:
# Invalid date value matched - skip date
continue
# Some files happen to have 6 consecutive digits (likely an ID of sorts)
# that sometimes gets matched as a date. This further restriction of
# matched timestamps will reduce the number of "false" positives (although
# ID's between 201011 and 203011 will slip by)
if not datetime.datetime(2010, 1, 1) < date < datetime.datetime(2030, 1, 1):
continue
dates.append(date.strftime("%Y-%m-%dT%H:%M:%SZ"))
if is_periodic and len(dates):
return [min(dates), max(dates)]
return sorted(set(dates)) | bc230145eee3f60491b4c42453fcbf5145ac7761 | 1,369 |