content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def data_check(data):
"""Check the data in [0,1]."""
return 0 <= float(data) <= 1 | b292ef07a024e53d82e706f0d88d50d6318d6593 | 1,900 |
import re
def tokenize(text):
"""
Tokenization function to process text data
Args:
text: String. disaster message.
Returns:
clean_tokens: list. token list from text message.
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# get list of all urls using regex
detected_urls = re.findall(url_regex, text)
# replace each url in text string with placeholder
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens | d5ee0929c0b6fad243b87c2b7e82270859b9b3f3 | 1,901 |
def get_symbol_historical(symbol_name):
"""Returns the available historical data for a symbol as a dictionary."""
# Get the data
symbol_data = get_symbol_data(symbol_name)
# Build the response
response = symbol_data.to_dict(orient="records")
return response | 37578652a13ff2b705c46185aba8cd47a73dc6e0 | 1,902 |
def guesses(word):
"""
return all of the first and second order guesses for this word
"""
result = list(known(*first_order_variants(word)))
result.sort()
return result | 9a74372e701d526d74df1df613b8648f47830202 | 1,903 |
def em(X, sf, inits, K, L, n_iter=100, n_inner_iter=50, tol=1e-5, zero_inflated=True):
"""
run EM algorithm on the given init centers
return the clustering labels with the highest log likelihood
"""
# add prepare reduced data here
print("start em algorithm")
res = _em(X, sf, inits, K, L, n_iter, n_inner_iter, tol, zero_inflated)
max_idx = np.argmax([r['llf'] for r in res])
sol = res[max_idx]
em_labels = np.argmax(sol['rho'], axis=1).flatten()
sol['labels'] = em_labels
return sol | 7a53c14caf56958fed80241bf347071b84a62280 | 1,904 |
def is_disaggregate(data, raw_fuel_sectors_enduses):
"""TODO: Disaggregate fuel for sector and enduses with floor
area and GVA for sectors and enduses (IMPROVE)
#TODO: DISAGGREGATE WITH OTHER DATA
"""
is_fueldata_disagg = {}
national_floorarea_sector = 0
for region_name in data['lu_reg']:
national_floorarea_sector += sum(data['ss_sector_floor_area_by'][region_name].values())
# Iterate regions
for region_name in data['lu_reg']:
is_fueldata_disagg[region_name] = {}
# Iterate sector
for sector in data['is_sectors']:
is_fueldata_disagg[region_name][sector] = {}
# Sector specifid info
reg_floorarea_sector = sum(data['ss_sector_floor_area_by'][region_name].values())
# Iterate enduse
for enduse in data['is_all_enduses']:
national_fuel_sector_by = raw_fuel_sectors_enduses[sector][enduse]
#print("national_fuel_sector_by: " + str(national_fuel_sector_by))
# ----------------------
# Disaggregating factors
# TODO: IMPROVE. SHOW HOW IS DISAGGREGATED
reg_disaggregation_factor = (1 / national_floorarea_sector) * reg_floorarea_sector
# Disaggregated national fuel
reg_fuel_sector_enduse = reg_disaggregation_factor * national_fuel_sector_by
is_fueldata_disagg[region_name][sector][enduse] = reg_fuel_sector_enduse
return is_fueldata_disagg | 64111bb23099526aae7e2bf7fa5aefcd6225fd7c | 1,905 |
def update_logger(evo_logger, x, fitness, memory, top_k, verbose=False):
""" Helper function to keep track of top solutions. """
# Check if there are solutions better than current archive
vals = jnp.hstack([evo_logger["top_values"], fitness])
params = jnp.vstack([evo_logger["top_params"], x])
concat_top = jnp.hstack([jnp.expand_dims(vals, 1), params])
sorted_top = concat_top[concat_top[:, 0].argsort()]
# Importantly: Params are stored as flat vectors
evo_logger["top_values"] = sorted_top[:top_k, 0]
evo_logger["top_params"] = sorted_top[:top_k, 1:]
evo_logger["log_top_1"].append(evo_logger["top_values"][0])
evo_logger["log_top_mean"].append(jnp.mean(evo_logger["top_values"]))
evo_logger["log_top_std"].append(jnp.std(evo_logger["top_values"]))
evo_logger["log_gen_1"].append(jnp.min(fitness))
evo_logger["log_gen_mean"].append(jnp.mean(fitness))
evo_logger["log_gen_std"].append(jnp.std(fitness))
evo_logger["log_sigma"].append(memory["sigma"])
evo_logger["log_gen"].append(memory["generation"])
if verbose:
print(evo_logger["log_gen"][-1], evo_logger["top_values"])
return evo_logger | 8efd1bbc4f0c1cde17e2ef425ae82cf3f5967df3 | 1,906 |
def ae(nb_features,
input_shape,
nb_levels,
conv_size,
nb_labels,
enc_size,
name='ae',
prefix=None,
feat_mult=1,
pool_size=2,
padding='same',
activation='elu',
use_residuals=False,
nb_conv_per_level=1,
batch_norm=None,
enc_batch_norm=None,
ae_type='conv', # 'dense', or 'conv'
enc_lambda_layers=None,
add_prior_layer=False,
add_prior_layer_reg=0,
use_logp=True,
conv_dropout=0,
include_mu_shift_layer=False,
single_model=False, # whether to return a single model, or a tuple of models that can be stacked.
final_pred_activation='softmax',
do_vae=False):
"""
Convolutional Auto-Encoder.
Optionally Variational.
Optionally Dense middle layer
"Mostly" in that the inner encoding can be (optionally) constructed via dense features.
Parameters:
do_vae (bool): whether to do a variational auto-encoder or not.
enc_lambda_layers functions to try:
K.softsign
a = 1
longtanh = lambda x: K.tanh(x) * K.log(2 + a * abs(x))
"""
# naming
model_name = name
# volume size data
ndims = len(input_shape) - 1
if isinstance(pool_size, int):
pool_size = (pool_size,) * ndims
# get encoding model
enc_model = conv_enc(nb_features,
input_shape,
nb_levels,
conv_size,
name=model_name,
feat_mult=feat_mult,
pool_size=pool_size,
padding=padding,
activation=activation,
use_residuals=use_residuals,
nb_conv_per_level=nb_conv_per_level,
conv_dropout=conv_dropout,
batch_norm=batch_norm)
# middle AE structure
if single_model:
in_input_shape = None
in_model = enc_model
else:
in_input_shape = enc_model.output.shape.as_list()[1:]
in_model = None
mid_ae_model = single_ae(enc_size,
in_input_shape,
conv_size=conv_size,
name=model_name,
ae_type=ae_type,
input_model=in_model,
batch_norm=enc_batch_norm,
enc_lambda_layers=enc_lambda_layers,
include_mu_shift_layer=include_mu_shift_layer,
do_vae=do_vae)
# decoder
if single_model:
in_input_shape = None
in_model = mid_ae_model
else:
in_input_shape = mid_ae_model.output.shape.as_list()[1:]
in_model = None
dec_model = conv_dec(nb_features,
in_input_shape,
nb_levels,
conv_size,
nb_labels,
name=model_name,
feat_mult=feat_mult,
pool_size=pool_size,
use_skip_connections=False,
padding=padding,
activation=activation,
use_residuals=use_residuals,
final_pred_activation='linear',
nb_conv_per_level=nb_conv_per_level,
batch_norm=batch_norm,
conv_dropout=conv_dropout,
input_model=in_model)
if add_prior_layer:
dec_model = add_prior(dec_model,
[*input_shape[:-1], nb_labels],
name=model_name,
prefix=model_name + '_prior',
use_logp=use_logp,
final_pred_activation=final_pred_activation,
add_prior_layer_reg=add_prior_layer_reg)
if single_model:
return dec_model
else:
return (dec_model, mid_ae_model, enc_model) | ab5bbed13e5636ab506612776920eaffa67b8b3e | 1,907 |
import os
import shlex
def read_authorized_keys(username=None):
"""Read public keys from specified user's authorized_keys file.
args:
username (str): username.
returns:
list: Authorised keys for the specified user.
"""
authorized_keys_path = '{0}/.ssh/authorized_keys'.format(os.path.expanduser('~{0}'.format(username)))
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(username, rnd_chars)
authorized_keys = list()
copy_result = execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), authorized_keys_path, tmp_authorized_keys_path))))
result_message = copy_result[0][1].decode('UTF-8')
if 'you must have a tty to run sudo' in result_message: # pragma: no cover
raise OSError("/etc/sudoers is blocked sudo. Remove entry: 'Defaults requiretty'.")
elif 'No such file or directory' not in result_message:
execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_authorized_keys_path))))
with open(tmp_authorized_keys_path) as keys_file:
for key in keys_file:
authorized_keys.append(PublicKey(raw=key))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path))))
return authorized_keys | 700c61571a2526d769492ade4fea4b395ff7d6ae | 1,908 |
import os
def get_fprime_version():
""" Gets the fprime version using setuptools_scm """
# First try to read the SCM version
try:
return get_version(root=os.sep.join([".."] * ROOT_PARENT_COUNT), relative_to=__file__)
# Fallback to a specified version when SCM is unavailable
except LookupError:
return "1.5.4" | 940ab7ffbf57d1f415b03b61b9169c1062fdb5c1 | 1,909 |
def parser_config(p):
"""JLS file info."""
p.add_argument('--verbose', '-v',
action='store_true',
help='Display verbose information.')
p.add_argument('filename',
help='JLS filename')
return on_cmd | ea9e20fd055933d7e1b1b5f92da76875f7f318e6 | 1,910 |
def decentralized_training_strategy(communication_rounds, epoch_samples, batch_size, total_epochs):
"""
Split one epoch into r rounds and perform model aggregation
:param communication_rounds: the communication rounds in training process
:param epoch_samples: the samples for each epoch
:param batch_size: the batch_size for each epoch
:param total_epochs: the total epochs for training
:return: batch_per_epoch, total_epochs with communication rounds r
"""
if communication_rounds >= 1:
epoch_samples = round(epoch_samples / communication_rounds)
total_epochs = round(total_epochs * communication_rounds)
batch_per_epoch = round(epoch_samples / batch_size)
elif communication_rounds in [0.2, 0.5]:
total_epochs = round(total_epochs * communication_rounds)
batch_per_epoch = round(epoch_samples / batch_size)
else:
raise NotImplementedError(
"The communication round {} illegal, should be 0.2 or 0.5".format(communication_rounds))
return batch_per_epoch, total_epochs | 3a743208af50d7c7865d5d5f86a4f58b0ba98a4d | 1,911 |
def create_config_file_lines():
"""Wrapper for creating the initial config file content as lines."""
lines = [
"[default]\n",
"config_folder = ~/.zettelkasten.d\n",
"\n",
"def_author = Ammon, Mathias\n",
"def_title = Config Parsed Test Title\n",
"def_location_specifier = None\n",
"\n",
"location = ~/zettelkasten\n",
"\n",
"initial_folder_structure = \n",
" lobby,\n",
" %(sources_directory)s,\n",
" _sources/audios,\n",
" _sources/images,\n",
" _sources/pdfs,\n",
" _sources/videos\n",
"\n",
"name_sep = /\n",
"\n",
"required_attributes = \n",
" uid,\n",
" category,\n",
" subcategory\n",
"\n",
"sources_directory = _sources\n",
"\n",
"styles_file = styles.cfg\n",
"\n",
"reserved_folder_names = \n",
" lobby,\n",
" %(sources_directory)s,\n",
" pytest_dir,\n",
" doctest_dir,\n",
" .zettelkasten.d\n",
"\n",
"zettelkasten_bib_file = zettelkasten.bib\n",
"\n",
"[source_file_formats]\n",
"audios = \n",
" mp3,\n",
" wav\n",
"images = \n",
" webp,\n",
" jpg,\n",
" jpeg,\n",
" png\n",
"pdfs =\n",
" pdf,\n",
" odt\n",
"videos =\n",
" mkv,\n",
" webm,\n",
" mp4\n",
"\n",
"[zettel_meta_attribute_defaults]\n",
"# required for zettel adding to work \n",
"category= None\n",
"subcategory= None\n",
"# optional\n",
"author = Mathias Ammon\n",
"topics =\n",
"tags =\n",
"doc = today\n",
"\n",
"[zettel_meta_attribute_labels]\n",
"# required for zettel adding to work\n",
"uid = #+Title:\n",
"category = #+Category:\n",
"subcategory = #+Subcategory:\n",
"# optional\n",
"author = #+Author:\n",
"doc = #+DOC:\n",
"dole = #+DOLE:\n",
"topics = #+Topics:\n",
"tags = #+Tags:\n",
]
return lines | d0d1057c3f450636279a8df9d4a39977f1eeef42 | 1,912 |
def p_planes_tangent_to_cylinder(base_point, line_vect, ref_point, dist, ):
"""find tangent planes of a cylinder passing through a given point ()
.. image:: ../images/plane_tangent_to_one_cylinder.png
:scale: 80 %
:align: center
Parameters
----------
base_point : point
point M
line_vect : vector
direction of the existing bar's axis, direction [the other pt, base_pt], **direction very important!**
ref_point : point
point Q
dist : float
cylinder radius
Returns
-------
list of two [ref_point, local_y, local_x]
local x = QB
local_y // line_vect
"""
l_vect = normalize_vector(line_vect)
tangent_pts = lines_tangent_to_cylinder(base_point, line_vect, ref_point, dist)
if tangent_pts is None:
return None
base_pt, upper_tang_pt, lower_tang_pt = tangent_pts
r1 = subtract_vectors(add_vectors(base_pt, upper_tang_pt), ref_point)
r1 = normalize_vector(r1)
r2 = subtract_vectors(add_vectors(base_pt, lower_tang_pt), ref_point)
r2 = normalize_vector(r2)
return [[ref_point, l_vect, r1], [ref_point, l_vect, r2]] | e8928e4314cadede97bef977c0348e32832157ad | 1,913 |
def BOPTools_AlgoTools3D_OrientEdgeOnFace(*args):
"""
* Get the edge <aER> from the face <aF> that is the same as the edge <aE>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aER:
:type aER: TopoDS_Edge &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools3D_OrientEdgeOnFace(*args) | 31da8b90e4ad5838b94a0481d937104845de735c | 1,914 |
def create_store_from_creds(access_key, secret_key, region, **kwargs):
"""
Creates a parameter store object from the provided credentials.
Arguments:
access_key {string} -- The access key for your AWS account
secret_key {string} -- The secret key for you AWS account
region {string} -- The region you wish to connect to
Keyword Arguments (Optional):
session='session' {string} -- The session token you wish to use.
Returns:
Object -- An AWS parameter store object.
"""
session = kwargs.get('session') if 'session' in kwargs else ''
store = EC2ParameterStore(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session, #optional
region_name=region
)
return store | 8e0ec2a6579a6013d36b6933ee922a406730ee35 | 1,915 |
import abc
def are_objects_equal(object1, object2):
"""
compare two (collections of) arrays or other objects for equality. Ignores nan.
"""
if isinstance(object1, abc.Sequence):
items = zip(object1, object2)
elif isinstance(object1, dict):
items = [(value, object2[key]) for key, value in object1.items()]
else:
items = [(object1, object2)]
# equal_nan does not exist in array_equal in old numpy
npy_major_version = tuple(int(v) for v in np.__version__.split('.')[:2])
if npy_major_version < (1, 19):
fixed = [(np.nan_to_num(a1), np.nan_to_num(a2)) for a1, a2 in items]
return np.all([np.all(a1 == a2) for a1, a2 in fixed])
try:
return np.all(
[np.array_equal(a1, a2, equal_nan=True) for a1, a2 in items]
)
except TypeError:
# np.array_equal fails for arrays of type `object` (e.g: strings)
return np.all([a1 == a2 for a1, a2 in items]) | 94b4b9a9f42bc8b1dd44d5e010b422082452f649 | 1,916 |
def get_recipes_from_dict(input_dict: dict) -> dict:
"""Get recipes from dict
Attributes:
input_dict (dict): ISO_639_1 language code
Returns:
recipes (dict): collection of recipes for input language
"""
if not isinstance(input_dict, dict):
raise TypeError("Input is not type dict")
recipes = input_dict
return recipes | e710d9629d10897d4aae7bf3d5de5dbbe18196c5 | 1,917 |
def tasks_from_wdl(wdl):
"""
Return a dictionary of tasks contained in a .wdl file.
The values are task definitions within the wdl
"""
return scopes_from_wdl("task", wdl) | 24d302995dcfa274b4b04868f901f832b36ec5cd | 1,918 |
import traceback
import sys
def wrap_parse(content, args):
"""
Wraps a call to `parse` in a try/except block so that one can use a Pool
and still get decent error messages.
Arguments
---------
content: segments are strings
args: a namespace, see `parse`
Returns
-------
parse trees and time to parse
"""
if content.strip()=="" or content is None:
return None
try:
trees = parse(content, args)
if len(trees)!=0:
return trees
else:
return None
except:
raise Exception(''.join(traceback.format_exception(*sys.exc_info()))) | 2e0a97c363be371fadbbabb22d83e1f4368205ad | 1,919 |
async def get_category_item_route(category_id: CategoryEnum, item_id: ObjectID,
db: AsyncIOMotorClient = Depends(get_database)) -> ItemInResponse:
"""Get the details about a particular item"""
_res = await db[category_id]["data"].find_one({"_id": item_id})
if _res:
return ItemInResponse(data=_res)
raise HTTPException(
status_code=404,
detail=f'ObjectID {item_id} not found in {category_id}') | 4feed87e3948994f8066268820355d9fdfe4999d | 1,920 |
def weighted_SVD(matrix, error=None, full_matrices=False):
"""
Finds the most important modes of the given matrix given the weightings
given by the error.
matrix a horizontal rectangular matrix
error weighting applied to the dimension corresponding to the rows
"""
if type(error) is type(None):
error = np.ones(matrix.shape[0])
expanded_error = error[:,np.newaxis]
to_svd = matrix / expanded_error
(SVD_U, SVD_S, SVD_V_transpose) =\
la.svd(to_svd, full_matrices=full_matrices)
SVD_U = SVD_U * expanded_error
return SVD_U, SVD_S, SVD_V_transpose.T | 5ca0f54af765f0694fb572ee3b82f4d59642bb06 | 1,921 |
def ingredients():
"""Route to list all ingredients currently in the database.
"""
query = request.args.get("q")
ingredients = db.get_ingredient_subset_from_db(query)
return jsonify(ingredients) | 376bcb8e16c0379676f9748f4a2858ea39ca33ab | 1,922 |
def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles from an HDF-5 file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from h5 file: ", particles_file)
if myrank == 0:
#h5 = tables.open_file(particles_file)
h5 = h5py.File(particles_file)
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = int(h5['particles'].shape[0])
if verbose:
print("Total of ", num_total_particles, " particles from file")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = h5['particles']
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch | caaeb89920b3cc9e0b263c9b1fea5fc1615ad8b3 | 1,923 |
import logging
def readAndMapFile(path):
"""
Main file breaker - this takes a given file and breaks it into arbitrary
fragments, returning and array of fragments. For simplicity, this is breaking on
newline characters to start with. May have to be altered to work with puncuation
and/or special characters as needed.
"""
splitLines = []
def mapper(line):
strippedLine = line.strip()
if (len(strippedLine) > 0):
splitLines.append(strippedLine)
with open(path, "r", encoding=FILE_ENCODING) as f:
content = f.read()
items = content.split("\n")
for i in items:
logging.info("n-gram length = {}".format(len(i)))
mapper(i)
logging.info("Read {} lines of text from {}".format(len(splitLines), path))
return splitLines | 4a542d1a08fcd88a1660de360c15d87949eddf11 | 1,924 |
def fetch_git_logs(repo, from_date, to_date, args): # pragma: no cover
"""Fetch all logs from Gitiles for the given date range.
Gitiles does not natively support time ranges, so we just fetch
everything until the range is covered. Assume that logs are ordered
in reverse chronological order.
"""
cursor = ''
commit_date = to_date
data = []
while cursor is not None:
page = fetch_git_page(repo, cursor)
logs = page.get('log', [])
cursor = page.get('next')
for log in logs:
committer = log.get('committer', {})
commit_date = date_from_git(committer.get('time'))
if not commit_date:
continue
if commit_date > to_date:
continue
if commit_date < from_date:
break
files = set()
for entry in log.get('tree_diff', []):
files.add(entry['old_path'])
files.add(entry['new_path'])
if args.path_filter_include:
if not any(matches_path_filter(p, args.path_filter_include)
for p in files):
continue
if args.path_filter_exclude:
if any(matches_path_filter(p, args.path_filter_exclude)
for p in files):
continue
data.append({
'author': log.get('author', {}).get('email'),
'date': commit_date,
'commit-bot': bool('commit-bot' in committer.get('email', '')),
'revision': log.get('commit'),
})
if commit_date < from_date:
break
return data | 1164b373e9b8f7186165712f8ac9e5e3d1a1f10f | 1,925 |
import torch
def _gen_bfp_op(op, name, bfp_args):
"""
Do the 'sandwich'
With an original op:
out = op(x, y)
grad_x, grad_y = op_grad(grad_out)
To the following:
x_, y_ = input_op(x, y)
Where input_op(x, y) -> bfp(x), bfp(y)
and input_op_grad(grad_x, grad_y) -> bfp(grad_x), bfp(grad_y)
out_ = op(x_, y_)
out = output_op(out)
Where output_op(out) -> bfp(out)
and output_op_grad(grad_out) -> bfp(grad_out)
This way we garantee that everything in and out of the forward and backward operations is
properly converted to bfp
"""
name = _get_op_name(name, **bfp_args)
class NewOpIn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, w):
return (float_to_bfp_batched(x, backward=False, **bfp_args), w)
@staticmethod
def backward(ctx, grad_x, grad_w):
return (grad_x, grad_w)
NewOpIn.__name__ = name + '_In'
new_op_in = NewOpIn.apply
class NewOpOut(torch.autograd.Function):
@staticmethod
def forward(ctx, op_out):
return op_out
@staticmethod
def backward(ctx, op_out_grad):
return float_to_bfp_batched(op_out_grad, backward=True, **bfp_args)
NewOpOut.__name__ = name + '_Out'
new_op_out = NewOpOut.apply
def new_op(x, w, *args, **kwargs):
x, w = new_op_in(x, w)
out = op(x, w, *args, **kwargs)
return new_op_out(out)
return new_op | d430bd9d090d0a47fa4d6a8c173c77b08e2fdb66 | 1,926 |
def angleaxis_to_rotation_matrix(aa):
"""Converts the 3 element angle axis representation to a 3x3 rotation matrix
aa: numpy.ndarray with 1 dimension and 3 elements
Returns a 3x3 numpy.ndarray
"""
angle = np.sqrt(aa.dot(aa))
if angle > 1e-6:
c = np.cos(angle);
s = np.sin(angle);
u = np.array([aa[0]/angle, aa[1]/angle, aa[2]/angle]);
R = np.empty((3,3))
R[0,0] = c+u[0]*u[0]*(1-c); R[0,1] = u[0]*u[1]*(1-c)-u[2]*s; R[0,2] = u[0]*u[2]*(1-c)+u[1]*s;
R[1,0] = u[1]*u[0]*(1-c)+u[2]*s; R[1,1] = c+u[1]*u[1]*(1-c); R[1,2] = u[1]*u[2]*(1-c)-u[0]*s;
R[2,0] = u[2]*u[0]*(1-c)-u[1]*s; R[2,1] = u[2]*u[1]*(1-c)+u[0]*s; R[2,2] = c+u[2]*u[2]*(1-c);
else:
R = np.eye(3)
return R | 57d849f137684824aa23d393802dc247df987b59 | 1,927 |
def sendOrderFAK(self, orderType, price, volume, symbol, exchange, stop=False):
"""发送委托"""
if self.trading:
# 如果stop为True,则意味着发本地停止单
req = {}
req['sid'] = self.sid
if orderType == CTAORDER_BUY:
req['direction'] = '0'
req['offset'] = '0'
elif orderType == CTAORDER_SELL:
req['direction'] = '1'
req['offset'] = '1'
elif orderType == CTAORDER_SELL_TODAY:
req['direction'] = '1'
req['offset'] = '3'
elif orderType == CTAORDER_SHORT:
req['direction'] = '1'
req['offset'] = '0'
elif orderType == CTAORDER_COVER:
req['direction'] = '0'
req['offset'] = '1'
elif orderType == CTAORDER_COVER_TODAY:
req['direction'] = '0'
req['offset'] = '3'
req['symbol'] = symbol
req['volume'] = volume
req['price'] = price
req['hedgeflag'] = '1'
req['ordertype'] = '1'
req['exchange'] = exchange
vtOrderID = ctaEngine.sendOrder(req)
return vtOrderID
else:
return None
# ---------------------------------------------------------------------- | 5b72ab3cdfa0b4412df2861d1e23a4a55f1d7206 | 1,928 |
import itertools
def unique(lst):
"""
:param lst: a list of lists
:return: a unique list of items appearing in those lists
"""
indices = sorted(list(range(len(lst))), key=lst.__getitem__)
indices = set(next(it) for k, it in
itertools.groupby(indices, key=lst.__getitem__))
return [x for i, x in enumerate(lst) if i in indices] | 0848d693681ff0f8bdbc0d0436b3d4450eee781e | 1,929 |
def max_frequency(sig, FS):
"""Compute max frequency along the specified axes.
Parameters
----------
sig: ndarray
input from which max frequency is computed.
FS: int
sampling frequency
Returns
-------
f_max: int
0.95 of max_frequency using cumsum.
"""
f, fs = plotfft(sig, FS, doplot=False)
t = np.cumsum(fs)
try:
ind_mag = np.where(t > t[-1]*0.95)[0][0]
except:
ind_mag = np.argmax(t)
f_max = f[ind_mag]
return f_max | 19321fb47d47b99138e1d1551f3728df4c2b7370 | 1,930 |
def split(text):
"""Turns the mobypron.unc file into a dictionary"""
map_word_moby = {}
try:
lines = text.split("\n")
for line in lines:
(word, moby) = line.split(" ", 1)
map_word_moby[word] = moby
except IOError as error:
print(f"Failed due to IOError: {error}")
return map_word_moby | ba051724f0399e918949c3e8b7fb010e2d87c9f9 | 1,931 |
def report(key_name=None, priority=-1, **formatters):
""" Use this decorator to indicate what returns to include in the report and how to format it """
def tag_with_report_meta_data(cls):
# guard: prevent bad coding by catching bad return key
if key_name and key_name not in cls.return_keys:
raise Exception("Task %s does not specify %s using the @returns decorator. "
"It cannot be used in @report" % (cls.name, key_name))
report_entry = {
"key_name": key_name,
'priority': priority,
'formatters': formatters,
}
if not hasattr(cls, 'report_meta'):
cls.report_meta = []
cls.report_meta.append(report_entry)
return cls
return tag_with_report_meta_data | 3830135de40bdc2a25bd3c6b6cecc194c6dbebac | 1,932 |
import scipy
def calc_momentum_def(x_loc, X, Y, U):
""" calc_momentum_def() : Calculates the integral momentum deficit of scalar field U stored at \
locations X,Y on a vertical line that runs nearest to x_loc. """
U_line, x_line, x_idx_line = get_line_quantity(x_loc, X, Y, U)
y_line = Y[:,x_idx_line]
return scipy.integrate.trapz(U_line*(1-U_line), y_line) | 7173450ebd779c07a80cef2deb37954ddb7509be | 1,933 |
def display_unit_title(unit, app_context):
"""Prepare an internationalized display for the unit title."""
course_properties = app_context.get_environ()
template = get_unit_title_template(app_context)
return template % {'index': unit.index, 'title': unit.title} | 9d8ffbf0672388bd890aaabb8e5fbdb5e193d3d2 | 1,934 |
def load_user(user_id):
"""Load the user object from the user ID stored in the session"""
return User.objects(pk=user_id).first() | 96df8d5e21f380369ae0c6ccc404a4f7880bf000 | 1,935 |
def get_complex_replay_list():
"""
For full replays that have crashed or failed to be converted
:return:
"""
return [
'https://cdn.discordapp.com/attachments/493849514680254468/496153554977816576/BOTS_JOINING_AND_LEAVING.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496153569981104129/BOTS_NO_POSITION.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496153605074845734/ZEROED_STATS.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496180938968137749/FAKE_BOTS_SkyBot.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/497149910999891969/NEGATIVE_WASTED_COLLECTION.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/497191273619259393/WASTED_BOOST_WHILE_SUPER_SONIC.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/501630263881760798/OCE_RLCS_7_CARS.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/561300088400379905/crossplatform_party.replay'
] | ef5a75a848289ad9c129c2b73a6d6845dcd07cfe | 1,936 |
import json
def parse_registry():
""" Parses the provided registry.dat file and returns a dictionary of chunk
file names and hashes. (The registry file is just a json dictionary containing
a list of file names and hashes.) """
registry = request.values.get("registry", None)
if registry is None:
return None
try:
ret = json.loads(registry)
except ValueError:
return abort(400)
if not isinstance(ret, dict):
return abort(400)
return ret | 71d4cd0f2b9fb33b92861feb9ea882fc32ec7234 | 1,937 |
import math
def get_cosine_with_hard_restarts_schedule_with_warmup(optim: Optimizer,
num_warmup_step: float,
num_training_step: int,
num_cycles: float = 1.,
last_epoch: int = -1):
"""
get a scheduler with a linear warmup between ``[0, num_warmup_step)`` and then decreases it following a cosine
function with several hard restarts.
"""
def lr_lambda(current_step):
if current_step < num_warmup_step:
return float(current_step) / float(max(1.0, num_warmup_step))
progress = float(current_step - num_warmup_step) / float(max(1, num_training_step - num_warmup_step))
if progress >= 1.0:
return 0.
return max(0., .5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.))))
return LambdaLR(optim, lr_lambda, last_epoch) | 5327cb688885c8ecc271156364a06bffedd97775 | 1,938 |
import typing
def home():
"""
Render Homepage
--------------------------------------------------------------
This site should be cached, because it is the main entry point for many users.
"""
bestseller: typing.List[Device] = get_bestsellers()
specialist_manufacturers = Manufacturer.query.filter(
(Manufacturer.name == "Samsung") | (Manufacturer.name == "Huawei")
).all()
return render_template("shop/home.html", bestseller=bestseller, specialist_manufacturers=specialist_manufacturers) | ca452264e8a10af83e0cc7b5df592a9f618085ad | 1,939 |
def reject_call():
"""Ends the call when a user does not want to talk to the caller"""
resp = twilio.twiml.Response()
resp.say("I'm sorry, Mr. Baker doesn't want to talk to you. Goodbye scum.", voice='woman', language='en-GB')
resp.hangup()
return str(resp) | 743e58b230a3a63df4c3e882139755b8d2c4bc55 | 1,940 |
def table_prep(data, columns=''):
"""
Data processor for table() function.
You can call it separately as well and in
return get a non-prettyfied summary table.
Unless columns are defined, the three first
columns are chosen by default.
SYNTAX EXAMPLE:
df['quality_score','influence_score','reach_score']
"""
if data.shape[1] != 3:
if len(columns) != 3:
if data.shape[1] > 3:
print("showing first three columns because no columns were \
specific / data had more than 3 columns")
data = pd.DataFrame(data[data.columns[0:3]])
if data.shape[1] < 3:
print("You need at least 3 columns of data for this table")
quit()
if len(columns) == 3:
data = data[columns]
desc = pd.DataFrame({'sum': data.sum().astype('int'),
'median': data.median(),
'mean': data.mean(),
'std': data.std()})
desc = desc.round(decimals=2)
return desc | a9d3d75d2ac32ddf5ae4d5a17a10974b61c139ee | 1,941 |
def lerp(a,b,t):
""" Linear interpolation between from @a to @b as @t goes between 0 an 1. """
return (1-t)*a + t*b | 12cb8690ba5e5f2a4c08c1cd29d3497513b63438 | 1,942 |
def convert_to_legacy_v3(
game_tick_packet: game_data_struct.GameTickPacket,
field_info_packet: game_data_struct.FieldInfoPacket = None):
"""
Returns a legacy packet from v3
:param game_tick_packet a game tick packet in the v4 struct format.
:param field_info_packet a field info packet in the v4 struct format. Optional. If this is not supplied,
none of the boost locations will be filled in.
"""
legacy_packet = GameTickPacket()
legacy_packet.numBoosts = game_tick_packet.num_boost
legacy_packet.numCars = game_tick_packet.num_cars
for i in range(game_tick_packet.num_cars):
convert_player_info(legacy_packet.gamecars[i], game_tick_packet.game_cars[i])
for i in range(game_tick_packet.num_boost):
convert_boost_info(legacy_packet.gameBoosts[i], game_tick_packet.game_boosts[i])
if field_info_packet is not None:
convert_vector(legacy_packet.gameBoosts[i].Location, field_info_packet.boost_pads[i].location)
convert_ball_info(legacy_packet.gameball, game_tick_packet.game_ball)
convert_game_info(legacy_packet.gameInfo, game_tick_packet.game_info)
return legacy_packet | 3e00e165233806957a010871c9218b1c02950063 | 1,943 |
import logging
def _load_audio(audio_path, sample_rate):
"""Load audio file."""
global counter
global label_names
global start
global end
logging.info("Loading '%s'.", audio_path)
try:
lbl1=Alphabet[audio_path[-6]]
lbl2 = Alphabet[audio_path[-5]]
except:
lbl1=1 + counter
lbl2=2 + counter
label_names=np.array([[lbl1,lbl2]]).astype(np.float32)
counter = counter + 1
print('label names')
print(audio_path)
#print(audio_path[-6]+audio_path[-5])
print(label_names)
beam.metrics.Metrics.counter('prepare-tfrecord', 'load-audio').inc()
with tf.io.gfile.GFile(audio_path, 'rb') as f:
audio_segment = (
pydub.AudioSegment.from_file(f)
.set_channels(1).set_frame_rate(sample_rate))
audio = np.array(audio_segment.get_array_of_samples()).astype(np.float32)
audio=audio[start:end]
audio /= 2 ** (8 * audio_segment.sample_width)
with tf.io.gfile.GFile(str(audio_path.replace("audio","audio_2")), 'rb') as sd:
audio_segment_2 = (
pydub.AudioSegment.from_file(sd)
.set_channels(1).set_frame_rate(sample_rate))
audio_2 = np.array(audio_segment_2.get_array_of_samples()).astype(np.float32)
audio_2=audio_2[start:end]
# Convert from int to float representation.
audio_2 /= 2**(8 * audio_segment_2.sample_width)
print('I am alive!')
start = start + 64000
end = end + 64000
#print(audio)
return {'audio': audio,'audio_2': audio_2} | 5e8112c79164c800965f137c83ceb720aab17bdf | 1,944 |
def generate_annotation_dict(annotation_file):
""" Creates a dictionary where the key is a file name
and the value is a list containing the
- start time
- end time
- bird class.
for each annotation in that file.
"""
annotation_dict = dict()
for line in open(annotation_file):
file_name, start_time, end_time, bird_class = line.strip().split('\t')
if file_name not in annotation_dict:
annotation_dict[file_name] = list()
annotation_dict[file_name].append([start_time, end_time, bird_class])
return annotation_dict | f40f210075e65f3dbe68bb8a594deb060a23ad8b | 1,945 |
def ishom(T, check=False, tol=100):
"""
Test if matrix belongs to SE(3)
:param T: SE(3) matrix to test
:type T: numpy(4,4)
:param check: check validity of rotation submatrix
:type check: bool
:return: whether matrix is an SE(3) homogeneous transformation matrix
:rtype: bool
- ``ishom(T)`` is True if the argument ``T`` is of dimension 4x4
- ``ishom(T, check=True)`` as above, but also checks orthogonality of the
rotation sub-matrix and validitity of the bottom row.
.. runblock:: pycon
>>> from spatialmath.base import *
>>> import numpy as np
>>> T = np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
>>> ishom(T)
>>> T = np.array([[1, 1, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]]) # invalid SE(3)
>>> ishom(T) # a quick check says it is an SE(3)
>>> ishom(T, check=True) # but if we check more carefully...
>>> R = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> ishom(R)
:seealso: :func:`~spatialmath.base.transformsNd.isR`, :func:`~isrot`, :func:`~spatialmath.base.transforms2d.ishom2`
"""
return (
isinstance(T, np.ndarray)
and T.shape == (4, 4)
and (
not check
or (
base.isR(T[:3, :3], tol=tol)
and np.all(T[3, :] == np.array([0, 0, 0, 1]))
)
)
) | b4a0467d22940889e3071bf07d4a093d567409f3 | 1,946 |
def _get_stp_data(step_order=STEP_ORDER, n=N_PER_STEP):
"""Returns np.array of step-type enums data for sample data.
Parameters
----------
step_order : list of (int, char)
List of (Cycle number, step type code) for steps in sample procedure.
n : int
Number of datapoints per step.
Returns
-------
stp_data : np.array(int)
"""
return np.hstack([_get_step_stp_idx_data(step_code, n=n) for _, step_code
in step_order]) | d96a2604ac67e1a84ead39e0d2d39a5c6183a5cd | 1,947 |
def actor_discrete_loss(actions, advantages, logits):
"""
Adapted from: http://inoryy.com/post/tensorflow2-deep-reinforcement-learning/
"""
# sparse categorical CE loss obj that supports sample_weight arg on call()
# from_logits argument ensures transformation into normalized probabilities
weighted_sparse_ce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# policy loss is defined by policy gradients, weighted by advantages
# note: we only calculate the loss on the actions we've actually taken
actions = tf.cast(actions, tf.int32)
policy_loss = weighted_sparse_ce(actions, logits, sample_weight=advantages)
# entropy loss can be calculated via CE over itself
# TODO: use this
# entropy_loss = tf.keras.losses.categorical_crossentropy(logits, logits, from_logits=True)
# here signs are flipped because optimizer minimizes
# return policy_loss - self.params['entropy']*entropy_loss
return policy_loss | a1a4cf0967d432655cc0788ad2c20bb0ca861d4f | 1,948 |
from typing import Union
from typing import List
def fuse_stride_arrays(dims: Union[List[int], np.ndarray],
strides: Union[List[int], np.ndarray]) -> np.ndarray:
"""
Compute linear positions of tensor elements
of a tensor with dimensions `dims` according to `strides`.
Args:
dims: An np.ndarray of (original) tensor dimensions.
strides: An np.ndarray of (possibly permuted) strides.
Returns:
np.ndarray: Linear positions of tensor elements according to `strides`.
"""
return fuse_ndarrays([
np.arange(0, strides[n] * dims[n], strides[n], dtype=SIZE_T)
for n in range(len(dims))
]) | 06185cb0bcfccd30e7b006fa8fe4e28a6f5ae7f3 | 1,949 |
def extract_jasmine_summary(line):
"""
Example SUCCESS karma summary line:
PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 SUCCESS (0.205 secs / 0.001 secs)
Exmaple FAIL karma summary line:
PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 (1 FAILED) ERROR (0.21 secs / 0.001 secs)
"""
# get totals
totals = line.split(' Executed ')[1].split(' ')
executed_tests, total_tests = int(totals[0]), int(totals[2])
# get failed
if 'SUCCESS' in line:
failed_tests = 0
else:
failed_tests = int(totals[3][1:])
return {
'total_tests': total_tests,
'executed_tests': executed_tests,
'failed_tests': failed_tests,
'passed_tests': executed_tests - failed_tests
} | f795ff015555cc3a2bd2d27527ae505a6dde9231 | 1,950 |
import argparse
import inspect
import re
import os
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
#-------------------------------------------------------------------------
# BEGIN: Per-script Configuration
#-------------------------------------------------------------------------
# set a pattern used to match desired function names
# example of only allowing certain prefixes:
# guard_pattern = r'^demo_'
guard_pattern = None
# set up auto type-conversions for functions that expect parameters to
# be of a specified type
# note: the parameter list must be complete for any specified function.
# the default parser will pass all parameters as strings if the
# function is not listed here.
parameter_types = {
'fun1' : { 'x' : int }
}
# set up auto parameter documentation here
# note: the parameter list must be complete for any specified function.
parameter_docs = {
'fun0' : {
'a' : 'Name',
'b' : 'Desired item',
'c' : 'Number of desired items',
'd' : 'The greeting'
}
}
#-------------------------------------------------------------------------
# END: Per-script Configuration
#-------------------------------------------------------------------------
# imports when using this as a script
# note: it's probably better to put these at the top of the file, but
# we're assuming the application may not rely on these modules.
# get the name of the current function (most likely "main")
current_name = inspect.currentframe().f_code.co_name
# create a list of functions used to test each function for exposure
tests = [
# only expose functions
inspect.isfunction,
# do not expose conventionally "private" functions
lambda f: f.__name__[ : 1 ] != '_',
# do not expose the current function
lambda f: f.__name__ != current_name
]
# if there's a guard pattern, set up a regular expression to test it
if guard_pattern is not None:
tests.append(
lambda f: re.match( guard_pattern, f.__name__ ) is not None
)
# create a filter function (in a closure) to omit unwanted functions
def create_predicate( tests ):
def predicate( function ):
for test in tests:
if test( function ) == False:
return False
return True
return predicate
test = create_predicate( tests )
# get a reference to the current module
module = sys.modules[ __name__ ]
# construct a list of functions from the module's dictionary
functions = [ m[ 1 ] for m in inspect.getmembers( module, test ) ]
# standard (improved) help argument specification
helpargs = [ '-h', '--help' ]
helpkwargs = {
'default' : argparse.SUPPRESS,
'help' : 'Display this help message and exit.',
'action' : 'help'
}
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'Module Shell Script Example',
add_help = False
)
parser.add_argument( *helpargs, **helpkwargs )
parser.add_argument(
'-t',
'--test',
default = argparse.SUPPRESS,
help = 'Execute script self-test.',
action = 'store_true'
)
parser.add_argument(
'-v',
'--version',
default = argparse.SUPPRESS,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
# set up sub-command parsers
subparsers = parser.add_subparsers(
title = 'Functions',
help = 'The following functions are available.'
)
# add a sub-command parser for each function
for function in functions:
# shortcut for the function name
name = function.__name__
# use the function's docstring for helpful information
docstring = inspect.getdoc( function )
# create a sub-parser for this function
subparser = subparsers.add_parser(
name,
description = docstring,
help = docstring,
add_help = False,
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
# standard help switch
subparser.add_argument( *helpargs, **helpkwargs )
# argument specification of function
fun_args = inspect.getargspec( function )
num_args = len( fun_args.args )
# check for argument defaults
if fun_args.defaults is not None:
defaults = list( fun_args.defaults )
else:
defaults = []
# load arguments into this sub-parser
for arg in fun_args.args:
# keyword arguments used to create the sub-parser argument
kwargs = {}
# check for default values specified in the function
if num_args == len( defaults ):
kwargs[ 'nargs' ] = '?'
kwargs[ 'default' ] = defaults.pop( 0 )
# check for specified parameter types for this function
if name in parameter_types:
kwargs[ 'type' ] = parameter_types[ name ][ arg ]
# check for specified parameter documentation for this function
if name in parameter_docs:
kwargs[ 'help' ] = parameter_docs[ name ][ arg ]
# add the specified argument to the sub-parser
subparser.add_argument( arg, **kwargs )
# decrement number of remaining arguments to add
num_args -= 1
# set the function to be called when this sub-command is issued
subparser.set_defaults( _call = function )
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# check for self-test request
if hasattr( args, 'test' ) and args.test == True:
result = 0
script = os.path.basename( __file__ )
tests = [
( script, 'fun0', 'Bob', 'waffles' ),
( script, 'fun0', 'Bob', 'waffles', 3 ),
( script, 'fun0', 'Bob', 'waffles', 4, 'Greetings' )
]
for test in tests:
try:
result = main( *test )
except:
print 'CAUGHT: {}'.format( sys.exc_info()[0] )
raise
else:
if result != 0:
return result
return result
# load arguments into a new dict instance
params = dict( vars( args ) )
# scrub things that aren't arguments to the requested function
# note: this means the function can't have parameters that begin with "_"
for key in params.keys():
if key[ : 1 ] == '_':
del params[ key ]
# call the function that was set for the specified sub-command
result = args._call( **params )
# check return for something non-shell-like
if type( result ) is not int:
print result
return 0
# return result
return result | 3f23b4f4ec29ed563c08432b30bf963739d4f789 | 1,951 |
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
def serialize_item(item):
"""
Args:
item: an XBlock
Returns:
fields: a dictionary of an XBlock's field names and values
block_type: the name of the XBlock's type (i.e. 'course'
or 'problem')
"""
# convert all fields to a dict and filter out parent and children field
fields = {
field: field_value.read_from(item)
for (field, field_value) in item.fields.items()
if field not in ['parent', 'children']
}
course_key = item.scope_ids.usage_id.course_key
block_type = item.scope_ids.block_type
# set or reset some defaults
fields['edited_on'] = str(getattr(item, 'edited_on', ''))
fields['display_name'] = item.display_name_with_default
fields['org'] = course_key.org
fields['course'] = course_key.course
fields['run'] = course_key.run
fields['course_key'] = str(course_key)
fields['location'] = str(item.location)
fields['block_type'] = block_type
fields['detached'] = block_type in DETACHED_XBLOCK_TYPES
if block_type == 'course':
# prune the checklists field
if 'checklists' in fields:
del fields['checklists']
# record the time this command was run
fields['time_last_dumped_to_neo4j'] = str(timezone.now())
return fields, block_type | 426e5e83644ca2f1a81491e7e0a65a67cca26f15 | 1,952 |
def gen_outfile_name(args):
"""Generate a name for the output file based on the input args.
Parameters
----------
args : argparse
argparse object to print
"""
return args.outfile + gen_identifier(args) | 6a91c26de3ae3ec39a2095434ccc18feb9fed699 | 1,953 |
def check_vg_tags(game_id):
"""Returns a user's tags."""
if game_id:
user_id = session.get('user_id')
user_query = VgTag.query.join(Tag).filter(Tag.user_id == user_id) # Only display user's tags for a specific game.
vg_tags = user_query.filter(VgTag.game_id == game_id).all()
return vg_tags
else:
return None | 1eed3e9a58a21a79ae5502a67bde0c409af71785 | 1,954 |
def load_fits(path):
"""
load the fits file
Parameters
----------
path: string, location of the fits file
Output
------
data: numpy array, of stokes images in (row, col, wv, pol)
header: hdul header object, header of the fits file
"""
hdul_tmp = fits.open(f'{path}')
data = np.asarray(hdul_tmp[0].data, dtype = np.float32)
header = hdul_tmp[0].header
return data, header | f0040e9ef3c8b2e7e4136f0ef7a7a2f9370a3653 | 1,955 |
def get_image_path(cfg,
metadata,
prefix='diag',
suffix='image',
metadata_id_list='default',):
"""
Produce a path to the final location of the image.
The cfg is the opened global config,
metadata is the metadata dictionairy (for the individual dataset file)
"""
#####
if metadata_id_list == 'default':
metadata_id_list = ['project', 'dataset', 'mip', 'exp', 'ensemble',
'field', 'short_name', 'preprocessor',
'diagnostic', 'start_year', 'end_year', ]
path = folder(cfg['plot_dir'])
if prefix:
path += prefix + '_'
# Check that the keys are in the dict.
intersection = [va for va in metadata_id_list if va in metadata.keys()]
path += '_'.join([str(metadata[b]) for b in intersection])
if suffix:
path += '_' + suffix
image_extention = get_image_format(cfg)
if path.find(image_extention) == -1:
path += image_extention
logger.info("Image path will be: %s", path)
return path | 0c725311db7b3290923f6206cb2bb4d382644e12 | 1,956 |
def ProjectNameToBinding(project_name, tag_value, location=None):
"""Returns the binding name given a project name and tag value.
Requires binding list permission.
Args:
project_name: project name provided, fully qualified resource name
tag_value: tag value to match the binding name to
location: region or zone
Returns:
binding_name
Raises:
InvalidInputError: project not found
"""
service = ServiceFns['tagBindings']()
with endpoints.CrmEndpointOverrides(location):
req = ListResourceFns['tagBindings'](parent=project_name)
response = service.List(req)
for bn in response.tagBindings:
if bn.tagValue == tag_value:
return bn.name
raise InvalidInputError(
'Binding not found for parent [{}], tagValue [{}]'.format(
project_name, tag_value)) | 00966f8b74378b905fe5b3c4e5a6716a5d4f71bf | 1,957 |
def degrees_of_freedom(s1, s2, n1, n2):
"""
Compute the number of degrees of freedom using the Satterhwaite Formula
@param s1 The unbiased sample variance of the first sample
@param s2 The unbiased sample variance of the second sample
@param n1 Thu number of observations in the first sample
@param n2 The number of observations in the second sample
"""
numerator = (s1**2/n1 + s2**2/n2)**2
denominator = ((s1**2/n1)**2)/(n1-1) + ((s2**2/n2)**2)/(n2-1)
degrees_of_freedom = numerator/denominator
return degrees_of_freedom | 5f076e33584c61dca4410b7ed47feb0043ec97cb | 1,958 |
import os
def get_requires_file(dist):
"""Get the path to the egg-info requires.txt file for a given dist."""
return os.path.join(
os.path.join(dist.location, dist.project_name + ".egg-info"),
"requires.txt",
) | f0fc66abc15fcba133240cc1783059d5694a08f6 | 1,959 |
def get_range_to_list(range_str):
"""
Takes a range string (e.g. 123-125) and return the list
"""
start = int(range_str.split('-')[0])
end = int(range_str.split('-')[1])
if start > end:
print("Your range string is wrong, the start is larger than the end!", range_str)
return range(start, end+1) | a88d9780ac2eba1d85ae70c1861f6a3c74991e5c | 1,960 |
import base64
def get_saml_assertion(server, session, access_token, id_token=None):
"""
Exchange access token to saml token to connect to VC
Sample can be found at
https://github.com/vmware/vsphere-automation-sdk-python/blob/master/samples/vsphere/oauth/exchange_access_id_token_for_saml.py
"""
stub_config = StubConfigurationFactory.new_std_configuration(
get_requests_connector(
session=session,
url=HTTP_ENDPOINT.format(server)
)
)
oauth_security_context = create_oauth_security_context(access_token)
stub_config.connector.set_security_context(oauth_security_context)
token_exchange = TokenExchange(stub_config)
exchange_spec = token_exchange.ExchangeSpec(
grant_type=token_exchange.TOKEN_EXCHANGE_GRANT,
subject_token_type=token_exchange.ACCESS_TOKEN_TYPE,
actor_token_type=token_exchange.ID_TOKEN_TYPE,
requested_token_type=token_exchange.SAML2_TOKEN_TYPE,
actor_token=id_token, subject_token=access_token)
response = token_exchange.exchange(exchange_spec)
saml_token = response.access_token
# convert saml token to saml assertion
samlAssertion = etree.tostring(
etree.XML(base64.decodebytes(
bytes(saml_token, 'utf-8')
))
).decode('utf-8')
return samlAssertion | 174400720340fb831d6a62728b48555db7349b95 | 1,961 |
import sys
def insert_cluster_metadata(clconn, name, desc, cli, verbose=False):
"""
Insert the cluster metadata information in the SQL table and return its rowid.
This is the information that describes how the clusters were made.
:param clconn: the database connection
:param name: the name of the clustering approach
:param desc: a human readable description of the clustering
:param cli: the command line command used for the clustering
:param verbose: more output
:return: the clusterdefinition_rowid for this metadata
"""
if verbose:
sys.stderr.write(f"{color.GREEN}Adding the metadata{color.ENDC}\n")
clcur = clconn.cursor()
clcur.execute("INSERT INTO clusterdefinition(name, description, command) values (?,?,?)",
[name, desc, cli])
cd_rowid = clcur.lastrowid
clconn.commit()
return cd_rowid | bc7462f3911225a935183747263656d02ee761fc | 1,962 |
import html
def display_value(id, value):
"""
Display a value in a selector-like style.
Parameters
----------
id: int
Id of the value to be displayed
"""
return html.div(
{
"class": "py-3 pl-3 w-full border-[1px] sm:w-[48%] md:w-[121px] bg-nav rounded-[3px] md:mr-2 my-4 before:content-[''] before:border-[6px] before:border-[transparent] before:top-1/2 before:right-5 before:-translate-y-0.5 before:absolute xl:w-[14%]",
},
html.h3(
{"value": id},
value,
),
) | aeb3ceeeb8a2048beb8df7f5d3e6027d90df4739 | 1,963 |
def helmholtz_adjoint_double_layer_regular(
test_point, trial_points, test_normal, trial_normals, kernel_parameters
):
"""Helmholtz adjoint double layer for regular kernels."""
wavenumber_real = kernel_parameters[0]
wavenumber_imag = kernel_parameters[1]
npoints = trial_points.shape[1]
dtype = trial_points.dtype
factor_real = _np.empty(npoints, dtype=dtype)
factor_imag = _np.empty(npoints, dtype=dtype)
output_real = _np.empty(npoints, dtype=dtype)
output_imag = _np.empty(npoints, dtype=dtype)
diff = _np.empty((3, npoints), dtype=dtype)
dist = _np.zeros(npoints, dtype=dtype)
laplace_grad = _np.zeros(npoints, dtype=dtype)
m_inv_4pi = dtype.type(M_INV_4PI)
for i in range(3):
for j in range(npoints):
diff[i, j] = test_point[i] - trial_points[i, j]
dist[j] += diff[i, j] * diff[i, j]
for j in range(npoints):
dist[j] = _np.sqrt(dist[j])
for i in range(3):
for j in range(npoints):
laplace_grad[j] += diff[i, j] * test_normal[i]
for j in range(npoints):
laplace_grad[j] *= m_inv_4pi / (dist[j] * dist[j] * dist[j])
factor_real[j] = _np.cos(wavenumber_real * dist[j]) * laplace_grad[j]
factor_imag[j] = _np.sin(wavenumber_real * dist[j]) * laplace_grad[j]
if wavenumber_imag != 0:
for j in range(npoints):
factor_real[j] *= _np.exp(-wavenumber_imag * dist[j])
factor_imag[j] *= _np.exp(-wavenumber_imag * dist[j])
for j in range(npoints):
output_real[j] = (-1 - wavenumber_imag * dist[j]) * factor_real[
j
] - wavenumber_real * dist[j] * factor_imag[j]
output_imag[j] = wavenumber_real * dist[j] * factor_real[j] + factor_imag[j] * (
-1 - wavenumber_imag * dist[j]
)
return output_real + 1j * output_imag | 6b640e2b7b02e124d893452b8437bfdf6f4af1ec | 1,964 |
import sys
import signal
def compute_vad(wav_rspecifier, feats_wspecifier, opts):
"""This function computes the vad based on ltsv features.
The output is written in the file denoted by feats_wspecifier,
and if the test_plot flaf is set, it produces a plot.
Args:
wav_rspecifier: An ark or scp file as in Kaldi, that contains the input audio
feats_wspecifier: An ark or scp file as in Kaldi, that contains the input audio
opts: Options. See main function for list of options
Returns:
The number of successful trials.
"""
num_utts, num_success = 0, 0
with SequentialWaveReader(wav_rspecifier) as reader, \
VectorWriter(feats_wspecifier) as writer:
for num_utts, (key, wave) in enumerate(reader, 1):
if wave.duration < opts.min_duration:
print("File: {} is too short ({} sec): producing no output.".format(key, wave.duration), file=sys.stderr)
continue
num_chan = wave.data().num_rows
if opts.channel >= num_chan:
print("File with id {} has {} channels but you specified "
"channel {}, producing no output.", file=sys.stderr)
continue
channel = 0 if opts.channel == -1 else opts.channel
fr_length_samples = int(opts.frame_window*wave.samp_freq*(10**(-3)))
fr_shift_samples = int(opts.frame_shift*wave.samp_freq*(10**(-3)))
try:
wav_data = np.squeeze(wave.data()[channel].numpy())
sample_freqs, segment_times, spec = signal.spectrogram(wav_data, fs=wave.samp_freq,
nperseg=fr_length_samples, nfft=opts.nfft,
noverlap=fr_length_samples-fr_shift_samples,
scaling='spectrum',mode = 'psd')
specT = np.transpose(spec)
spect_n = ARMA.ApplyARMA(specT, opts.arma_order)
ltsv_f = LTSV.ApplyLTSV(spect_n, opts.ltsv_ctx_window, opts.threshold,
opts.slope, opts.sigmoid_scale)
vad_feat = DCTF.ApplyDCT(opts.dct_num_cep, opts.dct_ctx_window, ltsv_f)
feats = Vector(vad_feat)
if opts.test_plot:
show_plot(segment_times, sample_freqs, spec, wave, wav_data, vad_feat)
except:
print("Failed to compute features for utterance", key,
file=sys.stderr)
continue
writer[key] = feats
num_success += 1
if num_utts % 10 == 0:
print("Processed {} utterances".format(num_utts),
file=sys.stderr)
print("Done {} out of {} utterances".format(num_success, num_utts),
file=sys.stderr)
return num_success != 0 | 7f652befce6b02c89f0e44d20dd08ee12b1e2783 | 1,965 |
def crt(s):
"""
Solve the system given by x == v (mod k),
where (k, v) goes over all key-value pairs of the dictionary s.
"""
x, n = 0, 1
for q, r in s.items():
x += n * ((r-x) * inverse(n, q) % q)
n *= q
return x | 6bcd489f9096cb780c935dd30ea90663d91f854f | 1,966 |
def create_new_tf_session(**kwargs):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(**kwargs)
sess.__enter__()
assert tf.get_default_session()
return sess | 1520f330fe7939c997588cf3d8c63265610baa23 | 1,967 |
import typing
import re
def MaybeGetHexShaOfLastExportedCommit(
repo: git.Repo, head_ref: str = "HEAD") -> typing.List[str]:
"""The the SHA1 of the most recently exported commit.
Args:
repo: The repo to iterate over.
head_ref: The starting point for iteration, e.g. the commit closest to
head.
Returns:
The hex SHA1 of the last exported commited, else None.
"""
export_re = re.compile(r'\n\[Exported from ([a-fA-F0-9]{40})\]')
try:
for commit in repo.iter_commits(head_ref):
if '\n[Exported from ' in commit.message:
match = export_re.search(commit.message)
assert match
return match.group(1)
except git.GitCommandError:
# Raise if no HEAD, i.e. no commits.
pass
return None | 1d6afe688567ffe245e9aabe753c90e6baf22bfe | 1,968 |
def get_inchi(ID):
"""This function accept UNIQUE-ID and return InChI string of a certain compound"""
inchi = df_cpd['INCHI'][ID]
return inchi | 2420a73c2a5e21348c6efde7cd6bcde0cc0c0c00 | 1,969 |
import os
def run_samtools_faidx(job, ref_id):
"""
Use Samtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
"""
job.fileStore.logToMaster('Created reference index')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
command = ['faidx', '/data/ref.fasta']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai')) | 6736d314c6fb72fcf019a41477e0b0bc77dd94bc | 1,970 |
from typing import Optional
def pad_to_multiple(array: Array,
factor: int,
axis: int,
mode: Optional[str] = 'constant',
constant_values=0) -> Array:
"""Pads `array` on a given `axis` to be a multiple of `factor`.
Padding will be concatenated to the end of the axis only, not the beginning.
If the length along `axis` is already a multiple of `factor`, this is
effectively a no-op.
Args:
array: Array with rank >= 1 to pad.
factor: Positive integer factor to pad for.
axis: A valid axis in `array` to pad.
mode: The padding mode to use according to `jnp.pad`. Defaults to
'constant'. See `jax.numpy.pad` documentation for more.
constant_values: For 'constant' mode, the pad value to use within `jnp.pad`.
Defaults to 0.
Returns:
The padded Array result.
"""
array = jnp.asarray(array)
if factor < 1:
raise ValueError(f'`factor` must be positive but got {factor}.')
rank = array.ndim
if axis < -rank or axis >= rank:
raise ValueError(
f'`axis` ({axis}) out of bounds for `array` rank ({rank}).')
axis_len = array.shape[axis]
pad_len = -axis_len % factor
pad_width = [(0, 0)] * rank
pad_width[axis] = (0, pad_len)
kwargs = {}
if mode == 'constant':
kwargs['constant_values'] = constant_values
return jnp.pad(array=array, pad_width=pad_width, mode=mode, **kwargs) | 5164e124dc270a47ef8f8b1512cdefe796904791 | 1,971 |
import json
def easy2dict(config: easydict.EasyDict):
"""
:param config: EasyDict参数
"""
# fix a Bug: cfg = dict(config) 仅仅转换第一层easydict
cfg = json.loads(json.dumps(config))
return cfg | 08a69816f44dfa03d86124792c1da1355710426f | 1,972 |
import math
def define_request(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
bounds_crs="EPSG:3005",
sortby=None,
pagesize=10000,
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
geom_column = wfs.get_schema("pub:" + table)["geometry_column"]
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
# build the CQL based on query and bounds
# (the bbox param shortcut is mutually exclusive with CQL_FILTER)
if query and not bounds:
request["CQL_FILTER"] = query
if bounds:
b0, b1, b2, b3 = [str(b) for b in bounds]
bnd_query = f"bbox({geom_column}, {b0}, {b1}, {b2}, {b3}, '{bounds_crs}')"
if not query:
request["CQL_FILTER"] = bnd_query
else:
request["CQL_FILTER"] = query + " AND " + bnd_query
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts | 215b39a606bfa7fc6736e8b2f61bf9c298412b36 | 1,973 |
from typing import List
from typing import Tuple
import torch
def get_bert_input(
examples: List[tuple],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Convert input list to torch tensor.
Args:
examples: (input_id_list, )
Returns:
attention_mask, input_ids_tensor, token_type_ids_tensor
"""
input_ids = examples[0]
token_type_ids = examples[1]
max_seq_len = min(max(len(input_id) for input_id in input_ids), MAX_SEQ_LEN)
input_ids_tensor = torch.zeros((len(input_ids), max_seq_len), dtype=torch.long)
token_type_ids_tensor = torch.zeros_like(input_ids_tensor)
attention_mask = torch.ones_like(input_ids_tensor)
for i, input_id in enumerate(input_ids):
cur_seq_len = len(input_id)
if cur_seq_len <= max_seq_len:
input_ids_tensor[i, :cur_seq_len] = torch.tensor(input_id, dtype=torch.long)
token_type_ids_tensor[i, :cur_seq_len] = torch.tensor(
token_type_ids[i], dtype=torch.long
)
attention_mask[i, cur_seq_len:] = 0
else:
input_ids_tensor[i] = torch.tensor(
input_id[: max_seq_len - 1] + [102], dtype=torch.long
)
token_type_ids_tensor[i] = torch.tensor(
token_type_ids[i][:max_seq_len], dtype=torch.long
)
return attention_mask, input_ids_tensor, token_type_ids_tensor | 954d0990d5cd5f28d588c472f7d7d48ecc4b3eb2 | 1,974 |
import io
import traceback
def _format_exception(e: BaseException):
"""
Shamelessly stolen from stdlib's logging module.
"""
with io.StringIO() as sio:
traceback.print_exception(e.__class__, e, e.__traceback__, None, sio)
return sio.getvalue().strip() | d80f60634a9862ca282b1c7ccf63ae8e945ffdc9 | 1,975 |
import json
def batch_deploy(blueprint_id,
parent_deployments,
group_id=None,
new_deployment_ids=None,
inputs=None,
labels=None,
**_):
"""
Create deployments for a batch from a single blueprint.
:param blueprint_id: The blueprint, which has already been uploaded.
:type blueprint_id: str
:param parent_deployments: A list of parent deployments.
:type parent_deployments: list
:param group_id: the new group ID.
:type group_id: str
:param new_deployment_ids: a list of new deployment names.
:type new_deployment_ids: list
:param inputs: A list of inputs to the new deployments.
:type inputs: list
:param labels: A list of labels to the new deployments.
:type labels: list
:return: group_id
:rtype: str
"""
if not isinstance(parent_deployments, list):
# If someone sends a list in the CLI,
# it will not be properly formatted.
try:
parent_deployments = json.loads(parent_deployments)
except json.JSONDecodeError:
raise NonRecoverableError(
'The parent_deployments parameter is not properly formatted. '
'Proper format is a list, a {t} was provided: {v}.'.format(
t=type(parent_deployments), v=parent_deployments))
group_id = group_id or generate_group_id_from_blueprint(
blueprint_id)
new_deployment_ids = new_deployment_ids or \
generate_deployment_ids_from_group_id(group_id, parent_deployments)
inputs = generate_inputs_from_deployments(inputs, parent_deployments)
labels = labels or generate_labels_from_inputs(inputs)
create_deployments(
group_id,
blueprint_id,
new_deployment_ids,
inputs,
labels)
return group_id | 8128e39c94bfc15a5b75d3a88274720b52d8d900 | 1,976 |
import json
def compute_task_def(build, settings, fake_build):
"""Returns a swarming task definition for the |build|.
Args:
build (model.Build): the build to generate the task definition for.
build.proto.infra and build.proto.input.properties must be initialized.
settings (service_config_pb2.SettingsCfg): global settings.
fake_build (bool): False if the build is not going to be actually
created in buildbucket. This is used by led that only needs the definition
of the task that *would be* used for a new build like this.
Returns a task_def dict.
Corresponds to JSON representation of
https://cs.chromium.org/chromium/infra/luci/appengine/swarming/swarming_rpcs.py?q=NewTaskRequest&sq=package:chromium&g=0&l=438
"""
assert isinstance(build, model.Build), type(build)
assert isinstance(fake_build, bool), type(fake_build)
assert build.proto.HasField('infra')
assert build.proto.input.HasField('properties')
assert isinstance(settings, service_config_pb2.SettingsCfg)
sw = build.proto.infra.swarming
task = {
'name': 'bb-%d-%s' % (build.proto.id, build.builder_id),
'tags': _compute_tags(build, settings),
'priority': str(sw.priority),
'task_slices': _compute_task_slices(build, settings),
}
if build.proto.number: # pragma: no branch
task['name'] += '-%d' % build.proto.number
if sw.task_service_account: # pragma: no branch
# Don't pass it if not defined, for backward compatibility.
task['service_account'] = sw.task_service_account
if not fake_build: # pragma: no branch | covered by swarmbucketapi_test.py
task['pubsub_topic'] = 'projects/%s/topics/swarming' % (
app_identity.get_application_id()
)
task['pubsub_userdata'] = json.dumps(
{
'build_id': build.proto.id,
'created_ts': utils.datetime_to_timestamp(utils.utcnow()),
'swarming_hostname': sw.hostname,
},
sort_keys=True,
)
return task | 7071960148ed391b42a4b7ad1e4ed4e6d0c10713 | 1,977 |
import traceback
from bs4 import BeautifulSoup
def parse_markdown(page, target=None, pages=None, categories=[], mode="html",
current_time="", bypass_errors=False):
"""Takes a page object (must contain "md" attribute) and returns parsed
and filtered HTML."""
target = get_target(target)
logger.info("Preparing page %s" % page["name"])
# We'll apply these filters to the page
page_filters = get_filters_for_page(page, target)
logger.debug("Filters for page {pg}: {fl}".format(
pg=page["name"], fl=page_filters))
# Get the markdown, preprocess, and apply md filters
try:
md = preprocess_markdown(page,
target=target,
categories=categories,
mode=mode,
current_time=current_time,
page_filters=page_filters,
bypass_errors=bypass_errors,
)
except Exception as e:
traceback.print_tb(e.__traceback__)
recoverable_error("Couldn't preprocess markdown for page %s: %s(%s)" %
(page["name"], repr(e), str(e)), bypass_errors)
# Just fetch the md without running the preprocessor
md = preprocess_markdown(page,
target=target,
categories=categories,
mode=mode,
current_time=current_time,
page_filters=page_filters,
bypass_errors=bypass_errors,
skip_preprocessor=True
)
# Actually parse the markdown
logger.info("... parsing markdown...")
html = markdown(md, extensions=["markdown.extensions.extra",
"markdown.extensions.toc"],
lazy_ol=False)
# Apply raw-HTML-string-based filters here
for filter_name in page_filters:
if "filter_html" in dir(config.filters[filter_name]):
logger.info("... applying HTML filter %s" % filter_name)
html = config.filters[filter_name].filter_html(
html,
currentpage=page,
categories=categories,
pages=pages,
target=target,
current_time=current_time,
mode=mode,
config=config,
logger=logger,
)
# Some filters would rather operate on a soup than a string.
# May as well parse once and re-serialize once.
soup = BeautifulSoup(html, "html.parser")
# Apply soup-based filters here
for filter_name in page_filters:
if "filter_soup" in dir(config.filters[filter_name]):
logger.info("... applying soup filter %s" % filter_name)
config.filters[filter_name].filter_soup(
soup,
currentpage=page,
categories=categories,
pages=pages,
target=target,
current_time=current_time,
mode=mode,
config=config,
logger=logger,
)
# ^ the soup filters apply to the same object, passed by reference
logger.info("... re-rendering HTML from soup...")
html2 = str(soup)
return html2 | 4e079c4c9d5f9ac9891f515ebc806877f3568cc8 | 1,978 |
def draw_bs_pairs(x, y, func, size=1):
"""Perform pairs bootstrap for replicates."""
# Set up array of indices to sample from: inds
inds = np.arange(len(x))
# Initialize replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
bs_replicates[i] = func(bs_x, bs_y)
return bs_replicates | f0b05241f567570dd96ed97340d5075b8ccb5a7b | 1,979 |
def has_hole(feature):
"""
Detects the number of holes in a shapely polygon or multipolygon.
Parameters
----------
feature : shapely Polygon or Multipolygon
polygon to be analyzed for holes
Returns
-------
int
number of holes
"""
if feature.geom_type == 'Polygon':
num_holes = len(feature.interiors)
elif feature.geom_type == 'MultiPolygon':
num_holes = np.sum([len(x.interiors) for x in feature])
return num_holes | e854d7a4902e66ec95479816662a145e184ee8af | 1,980 |
def linder_table(file=None, **kwargs):
"""Load Linder Model Table
Function to read in isochrone models from Linder et al. 2019.
Returns an astropy Table.
Parameters
----------
age : float
Age in Myr. If set to None, then an array of ages from the file
is used to generate dictionary. If set, chooses the closest age
supplied in table.
file : string
Location and name of COND file. See isochrones stored at
https://phoenix.ens-lyon.fr/Grids/.
Default is model.AMES-Cond-2000.M-0.0.JWST.Vega
"""
# Default file to read and load
if file is None:
base_dir = conf.PYNRC_PATH + 'linder/isochrones/'
file = base_dir + 'BEX_evol_mags_-3_MH_0.00.dat'
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
cnames = content[2].split(',')
cnames = [name.split(':')[1] for name in cnames]
ncol = len(cnames)
content_arr = []
for line in content[4:]:
arr = np.array(line.split()).astype(np.float)
if len(arr)>0:
content_arr.append(arr)
content_arr = np.array(content_arr)
# Convert to Astropy Table
tbl = Table(rows=content_arr, names=cnames)
return tbl | ff6b187009c8bbcef8ae604095c289429863907e | 1,981 |
def json_redirect(request, url, **kwargs):
"""
Returns a JSON response for redirecting to a new URL. This is very specific
to this project and depends on the JavaScript supporting the result that
is returned from this method.
"""
if not request.is_ajax():
raise PermissionDenied("Must be an AJAX request.")
return JsonResponse({'url': url}, **kwargs) | 7fbafcfc400c733badc26fcb97bc3a61f4c49f74 | 1,982 |
def unauthenticatedClient():
"""Retorna um api client sem ninguém autenticado"""
return APIClient() | b821a7c1e11a398eee691ca43be54d5aca00d213 | 1,983 |
import os
def filters_to_kcorrect(curve_file, verbose=False):
"""
Convert a filter response curve to the Kcorrect format.
This is used by Kcorrect and iSEDFit.
"""
if not os.path.isfile(curve_file):
raise IOError("# Cannot find the response curve file {}".format(curve_file))
# Read in the .txt response curve
wave, response = np.genfromtxt(curve_file, usecols=(0, 1), unpack=True)
# Output file name
prefix, _ = os.path.splitext(curve_file)
output_par = prefix + '.par'
if os.path.isfile(output_par):
if verbose:
print("# Curve {0} is already available".format(output_par))
else:
assert len(wave) == len(response), '''
Wavelength and response curve should have the same size'''
par = open(output_par, 'w')
par.write(
"# %s\n typedef struct {\n double lambda;\n double pass;\n } KFILTER;\n\n")
for w, r in zip(wave, response):
par.write("KFILTER %10.4f %11.7f\n" % (w, r))
par.close()
return wave, response | 144e1af636778e503c394a615af741dc51f5f7d9 | 1,984 |
import re
def get_known_disk_attributes(model):
"""Get known NVMe/SMART attributes (model specific), returns str."""
known_attributes = KNOWN_DISK_ATTRIBUTES.copy()
# Apply model-specific data
for regex, data in KNOWN_DISK_MODELS.items():
if re.search(regex, model):
for attr, thresholds in data.items():
if attr in known_attributes:
known_attributes[attr].update(thresholds)
else:
known_attributes[attr] = thresholds
# Done
return known_attributes | 39ece3213996b201d1109d7787bcd8fed859235b | 1,985 |
def get_one_exemplar_per_class_proximity(proximity):
"""
unpack proximity object into X, y and random_state for picking exemplars.
----
Parameters
----
proximity : Proximity object
Proximity like object containing the X, y and random_state variables
required for picking exemplars.
----
Returns
----
result : function
function choosing one exemplar per class
"""
return get_one_exemplar_per_class(proximity.X, proximity.y, proximity.random_state) | eeb46d07a757d6b06432369f26f5f2391d9b14cd | 1,986 |
def annotation_layers(state):
"""Get all annotation layer names in the state
Parameters
----------
state : dict
Neuroglancer state as a JSON dict
Returns
-------
names : list
List of layer names
"""
return [l["name"] for l in state["layers"] if l["type"] == "annotation"] | 98dee6b821fbfe2dd449859400c2166ba694025f | 1,987 |
def describe_bvals(bval_file) -> str:
"""Generate description of dMRI b-values."""
# Parse bval file
with open(bval_file, "r") as file_object:
raw_bvals = file_object.read().splitlines()
# Flatten list of space-separated values
bvals = [
item for sublist in [line.split(" ") for line in raw_bvals] for item in sublist
]
bvals = sorted([int(v) for v in set(bvals)])
bvals = [num_to_str(v) for v in bvals]
bval_str = list_to_str(bvals)
bval_str = "b-values of {} acquired".format(bval_str)
return bval_str | 1d19c71d9422a37f425c833df52d9b1936195660 | 1,988 |
def weight_update4(weights, x_white, bias1, lrate1, b_exp):
""" Update rule for infomax
This function recieves parameters to update W1
* Input
weights : unmixing matrix (must be a square matrix)
x_white: whitened data
bias1: current estimated bias
lrate1: current learning rate
b_exp : experiment
* Output
weights : updated mixing matrix
bias: updated bias
lrate1: updated learning rate
"""
NCOMP, NVOX = (x_white.shape)
block1 = (int(np.floor(np.sqrt(NVOX / 3))))
last1 = (int(np.fix((NVOX/block1-1)*block1+1)))
if not b_exp :
permute1 = permutation(NVOX)
else :
permute1 = range(NVOX)
for start in range(0, last1, block1):
if start + block1 < NVOX:
tt2 = (start + block1 )
else:
tt2 = (NVOX)
block1 = (NVOX - start)
unmixed = (np.dot(weights, x_white[:, permute1[start:tt2]]) + bias1)
logit = 1 / (1 + np.exp(-unmixed))
weights = (weights + lrate1 * np.dot(
block1 * np.eye(NCOMP) + np.dot( (1-2*logit), unmixed.T), weights))
bias1 = (bias1 + lrate1 * (1-2*logit).sum(axis=1).reshape(bias1.shape))
# Checking if W blows up
if (np.isnan(weights)).any() or np.max(np.abs(weights)) > MAX_WEIGHT:
# ("Weight is outside the range. Restarting.")
weights = (np.eye(NCOMP))
bias1 = (np.zeros((NCOMP, 1)))
error = 1
if lrate1 > 1e-6 and \
matrix_rank(x_white) < NCOMP:
a = 1
# ("Data 1 is rank defficient"
# ". I cannot compute " +
# str(NCOMP) + " components.")
return (None, None, None, 1)
if lrate1 < 1e-6:
a = 1
# ("Weight matrix may"
# " not be invertible...")
return (None, None, None, 1)
break
else:
error = 0
return (weights, bias1, lrate1, error) | 6c2d5c6610724787b4e8c8fb42569265e4b13d76 | 1,989 |
def Dijkstra(graph, source):
"""
Dijkstra's algorithm for shortest path between two vertices on a graph.
Arguments
---------
graph -- directed graph; object of Graph class
source -- start vertex
>>> graph = Graph()
>>> graph.addVertex("A")
>>> conns = [ ("A", "B"), ("A", "C"), ("B", "C"), ("C", "D") ]
>>> for va, vb in conns:
... graph.addConn(va, vb)
>>> dists = Dijkstra(graph, 'A')
>>> dists['D']
2
"""
dist = {}
pq = pQ.BinaryHeap()
for node in graph:
if node != source:
dist[node] = float('inf')
else:
dist[node] = 0
pq.insert((dist[node], node))
while not pq.isEmpty():
current = pq.delMin()
for next_node in graph.getConns(current[1]):
new_dist = current[0] + 1
if new_dist < dist[next_node]:
dist[next_node] = new_dist
pq.editHeap(next_node, (dist[next_node], next_node))
return dist | 9585c13c5504cdbff62494c2d5d97655c2281c34 | 1,990 |
def annealing_epsilon(episode: int, min_e: float, max_e: float, target_episode: int) -> float:
"""Return an linearly annealed epsilon
Epsilon will decrease over time until it reaches `target_episode`
(epsilon)
|
max_e ---|\
| \
| \
| \
min_e ---|____\_______________(episode)
|
target_episode
slope = (min_e - max_e) / (target_episode)
intercept = max_e
e = slope * episode + intercept
Args:
episode (int): Current episode
min_e (float): Minimum epsilon
max_e (float): Maximum epsilon
target_episode (int): epsilon becomes the `min_e` at `target_episode`
Returns:
float: epsilon between `min_e` and `max_e`
"""
slope = (min_e - max_e) / (target_episode)
intercept = max_e
return max(min_e, slope * episode + intercept) | fab650085f271f1271025e23f260eb18e645a9ba | 1,991 |
import jsonschema
def ExtendWithDefault(validator_class):
"""Takes a validator and makes it set default values on properties.
Args:
validator_class: A class to add our overridden validators to
Returns:
A validator_class that will set default values
and ignore required fields
"""
validate_properties = validator_class.VALIDATORS['properties']
def SetDefaultsInProperties(validator, user_schema, user_properties,
parent_schema):
SetDefaults(validator, user_schema or {}, user_properties,
parent_schema, validate_properties)
return jsonschema.validators.extend(
validator_class, {PROPERTIES: SetDefaultsInProperties,
REQUIRED: IgnoreKeyword}) | 42ab80b2c52e474a354589eb4c6041450cf23fd2 | 1,992 |
def coach_input_line(call, school, f):
"""
Returns a properly formatted line about a coach.
:param call: (String) The beginning of the line, includes the gender, sport, and school abbreviation.
:param school:(String) The longform name of the school.
:param f: (String) The input line from the user.
:return: (String) A properly formatted line with all necessary information about a coach.
"""
f = f.split("\t")
newCall = f[2].split(" ")
for item in newCall:
call += item[0].lower()
print(call)
print(f[2])
return f"{call}\t{school}'s {coachformat(f[2])}, {f[0]} {f[1]},\t{f[0]} {f[1]},\t{f[1]}\n" | 762127ac058949af890c2ef7f19b924642cc4c39 | 1,993 |
def pad_seq(seq, max_length, PAD=0):
"""
:param seq: list of int,
:param max_length: int,
:return seq: list of int,
"""
seq += [PAD for i in range(max_length - len(seq))]
return seq | bb61677bc658e22b317e3d5fb10f7c85a84200d0 | 1,994 |
def complex_domain(spectrogram):
"""
Complex Domain.
Parameters
----------
spectrogram : :class:`Spectrogram` instance
:class:`Spectrogram` instance.
Returns
-------
complex_domain : numpy array
Complex domain onset detection function.
References
----------
.. [1] Juan Pablo Bello, Chris Duxbury, Matthew Davies and Mark Sandler,
"On the use of phase and energy for musical onset detection in the
complex domain",
IEEE Signal Processing Letters, Volume 11, Number 6, 2004.
"""
# take the sum of the absolute changes
return np.asarray(np.sum(np.abs(_complex_domain(spectrogram)), axis=1)) | 10248ca5bb291326018934d654b2fee6a8a972d0 | 1,995 |
import torch
def toOneHot(action_space, actions):
"""
If action_space is "Discrete", return a one hot vector, otherwise just return the same `actions` vector.
actions: [batch_size, 1] or [batch_size, n, 1]
If action space is continuous, just return the same action vector.
"""
# One hot encoding buffer that you create out of the loop and just keep reusing
if action_space.__class__.__name__ == "Discrete":
nr_actions = action_space.n
actions_onehot_dim = list(actions.size())
actions_onehot_dim[-1] = nr_actions
actions = actions.view(-1, 1).long()
action_onehot = torch.FloatTensor(actions.size(0), nr_actions)
return_variable = False
if isinstance(actions, Variable):
actions = actions.data
return_variable = True
# In your for loop
action_onehot.zero_()
if actions.is_cuda:
action_onehot = action_onehot.cuda()
action_onehot.scatter_(1, actions, 1)
if return_variable:
action_onehot = Variable(action_onehot)
action_onehot.view(*actions_onehot_dim)
return action_onehot
else:
return actions.detach() | bad47c1f55795d16bdcd67aac67b4ae40a40363c | 1,996 |
def find_triangle(n):
"""Find the first triangle number with N divisors."""
t, i = 1, 1
while True:
i += 1
t += i
if len(divisors(t)) > n:
return t | b74e0e8fd869b4d9a9ae1fe83299f32eaa848e9a | 1,997 |
import requests
def get_main_page_soup(home_url):
""" parse main page soup"""
user_agent= 'Mozilla / 5.0 (Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, ' \
'like Gecko) Chrome / 64.0.3282.140 Safari / 537.36 Edge / 18.17763 '
headers = {'User-agent':user_agent}
# request to javbus
res = requests.get(home_url, headers=headers, timeout=20)
res.raise_for_status()
# init beautiful soup
soup = bs4.BeautifulSoup(res.text, 'lxml')
return soup | 6100fa9b669ee498dea354418b3816bbc46b3b26 | 1,998 |
def gen_task4() -> np.ndarray:
"""Task 4: main corner of a triangle."""
canv = blank_canvas()
r, c = np.random.randint(GRID-2, size=2, dtype=np.int8)
syms = rand_syms(6) # 6 symbols for triangle
# Which orientation? We'll create 4
rand = np.random.rand()
if rand < 0.25:
# top left
rows, cols = [r, r, r, r+1, r+1, r+2], [c, c+1, c+2, c, c+1, c]
elif rand < 0.50:
# top right
rows, cols = [r, r, r, r+1, r+1, r+2], [c+2, c, c+1, c+1, c+2, c+2]
elif rand < 0.75:
# bottom left
rows, cols = [r+2, r, r+1, r+1, r+2, r+2], [c, c, c, c+1, c+1, c+2]
else:
# bottom right
rows, cols = [r+2, r, r+1, r+1, r+2, r+2], [c+2, c+2, c+1, c+2, c, c+1]
canv[rows, cols] = syms
return [4, syms[0]], canv | d367af38a74fd57eb86d001103a1f8656b395209 | 1,999 |