content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import time
def test(ipu_estimator, args, x_test, y_test):
"""
Test the model on IPU by loading weights from the final checkpoint in the
given `args.model_dir`.
"""
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.prefetch(len(x_test)).cache()
dataset = dataset.batch(args.batch_size, drop_remainder=True)
return dataset
num_test_examples = len(x_test)
steps = num_test_examples // args.batch_size
# IPUEstimator requires no remainder; batches_per_step must divide steps
steps -= steps % args.batches_per_step
print(f"Evaluating on {steps * args.batch_size} examples")
# Set up profiling hook
hooks = []
if args.profile:
hooks.append(ProfilerHook(ipu_estimator.model_dir, name='eval'))
t0 = time.time()
metrics = ipu_estimator.evaluate(input_fn=input_fn,
steps=steps,
hooks=hooks)
t1 = time.time()
test_loss = metrics["loss"]
test_accuracy = metrics["accuracy"]
duration_seconds = t1 - t0
print("Test loss: {:g}".format(test_loss))
print("Test accuracy: {:.2f}%".format(100 * test_accuracy))
print(f"Took {duration_seconds:.2f} seconds to compile and run") | 083c2c830315ccf2602109a4a3e718cecd1b6760 | 3,658,028 |
def _get_service():
"""Gets service instance to start API searches.
Returns:
A Google API Service used to send requests.
"""
# Create the AI Platform service object.
# To authenticate set the environment variable
# GOOGLE_APPLICATION_CREDENTIALS=<path_to_service_account_file>
return googleapiclient.discovery.build('ml', 'v1') | 5d79698216626eff9618dc55b6b651a5da3f5187 | 3,658,029 |
def giq(scores, targets, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
"""
Generalized inverse quantile conformity score function.
E from equation (7) in Romano, Sesia, Candes. Find the minimum tau in [0, 1] such that the correct label enters.
"""
E = -np.ones((scores.shape[0],))
for i in range(scores.shape[0]):
E[i] = get_tau(
scores[i : i + 1, :],
targets[i].item(),
I[i : i + 1, :],
ordered[i : i + 1, :],
cumsum[i : i + 1, :],
penalties[0, :],
randomized=randomized,
allow_zero_sets=allow_zero_sets,
)
return E | 99a877053cf095622184cbbd9043b742c6ae076f | 3,658,030 |
def findUser(userId):
"""
:param userId:
:return: The user obj
Finds a particular user from a dataset.
"""
return user_collection.find_one({"user_id": userId}) | ffca934689c554993ca5d33005a32e4f9afe48cd | 3,658,031 |
def sub_sample_map(data, aug_map, n_input, n_output, n_teach, buffer):
"""
Expands an augmentation map to produce indexes that will allow
targets values of previous outputs to be used as inputs
"""
n_io = n_input + n_output
n_req = n_io
teach_range = range(n_teach)
tf_map = []
for map_ in aug_map:
sample = data[map_["orig_sample_idx"]]
n = len(sample)
i = np.random.randint(n - n_io - n_teach - buffer)
j = i + n_req + n_teach + buffer
new_map_ = {"strt_idx": i, "end_idx": j, **map_}
tf_map.append(new_map_)
return tf_map | 05f88939ad2e293e3370f5585ad30f1d9d6256d1 | 3,658,032 |
def rcGetBBModelEnum():
""" Get the BeagleBone model as member of the BBModel Enum. """
return BBModel(rcGetBBModel()) | 90cf6857f2754a1947d017a1a57b11790d534c05 | 3,658,035 |
def ordToString(ordList):
"""Use this function to convert ord values to strings."""
newStrList = []
cstr = ""
for cint in ordList:
cstr += chr(cint)
if cint == 44:
newStrList.append(cstr[:-1])
cstr = ""
return newStrList | 5a836f7fe34803744de90aa2608e3d99a081c7ff | 3,658,036 |
def get_test_data_for_successful_build():
"""Returns a test data set of test suites and cases that passed.
"""
return _get_test_data(["PASSED", "PASSED", "PASSED"]) | 8969d33f887dcc7c7f7fb8148cbcfc7a4eb4d7c1 | 3,658,037 |
import re
import logging
def fromOldAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a pre-June-2014 string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
It can read both "old style" that existed for years, an the "intermediate style" that
existed for a few months in 2014, with the extra column of integers for lone pairs.
"""
atoms = []
atomdict = {}
bonds = {}
try:
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Skip the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
atomicMultiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '{Cd, Ct}'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"Shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group())
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next is the electron state
radicalElectrons = []; atomSpinMultiplicity = []
elecState = data[index].upper()
if elecState[0] == '{':
elecState = elecState[1:-1].split(',')
else:
elecState = [elecState]
for e in elecState:
if e == '0':
radicalElectrons.append(0); atomSpinMultiplicity.append(1)
elif e == '1':
radicalElectrons.append(1); atomSpinMultiplicity.append(2)
elif e == '2':
radicalElectrons.append(2); atomSpinMultiplicity.append(1)
radicalElectrons.append(2); atomSpinMultiplicity.append(3)
elif e == '2S':
radicalElectrons.append(2); atomSpinMultiplicity.append(1)
elif e == '2T':
radicalElectrons.append(2); atomSpinMultiplicity.append(3)
elif e == '3':
radicalElectrons.append(3); atomSpinMultiplicity.append(4)
elif e == '3D':
radicalElectrons.append(3); atomSpinMultiplicity.append(2)
elif e == '3Q':
radicalElectrons.append(3); atomSpinMultiplicity.append(4)
elif e == '4':
radicalElectrons.append(4); atomSpinMultiplicity.append(5)
elif e == '4S':
radicalElectrons.append(4); atomSpinMultiplicity.append(1)
elif e == '4T':
radicalElectrons.append(4); atomSpinMultiplicity.append(3)
elif e == '4V':
radicalElectrons.append(4); atomSpinMultiplicity.append(5)
elif e == 'X':
radicalElectrons.extend([0,1,2,2])
atomSpinMultiplicity.extend([1,2,1,3])
index += 1
# Next number defines the number of lone electron pairs (if provided)
lonePairsOfElectrons = -1
if len(data) > index:
lpState = data[index]
if lpState[0] == '{':
# this is the start of the chemical bonds - no lone pair info was provided
lonePairsOfElectrons = -1
else:
if lpState == '0':
lonePairsOfElectrons = 0
if lpState == '1':
lonePairsOfElectrons = 1
if lpState == '2':
lonePairsOfElectrons = 2
if lpState == '3':
lonePairsOfElectrons = 3
if lpState == '4':
lonePairsOfElectrons = 4
index += 1
else: # no bonds or lone pair info provided.
lonePairsOfElectrons = -1
# Create a new atom based on the above information
if group:
# charge currently not allowed
atom = GroupAtom(atomType=atomType,
radicalElectrons=sorted(set(radicalElectrons)),
charge=[0],
label=label,
lonePairs=(None if lonePairsOfElectrons==-1 else [lonePairsOfElectrons])
)
else:
atom = Atom(element=atomType[0],
radicalElectrons=radicalElectrons[0],
charge=0,
label=label,
lonePairs=lonePairsOfElectrons
)
atomicMultiplicities[atom] = atomSpinMultiplicity
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Attempted to create a bond between atom {0:d} and itself.'.format(aid))
if order[0] == '{':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
if group:
multiplicity = None
else:
multiplicity = 1
for atom in atoms:
multiplicity += max(atomicMultiplicities[atom]) - 1
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Atom {0:d} not in bond dictionary.'.format(atom2))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Found bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Found bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1]))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Multiple bond orders specified for an atom in a Molecule.')
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if saturateH and not group:
# Add explicit hydrogen atoms to complete structure if desired
valences = {'H': 1, 'C': 4, 'O': 2, 'N': 3, 'S': 2, 'Si': 4, 'Cl': 1, 'He': 0, 'Ne': 0, 'Ar': 0}
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
newAtoms = []
for atom in atoms:
try:
valence = valences[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown valence for atom "{0}".'.format(atom.symbol))
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
count = valence - radical - int(order)
for i in range(count):
a = Atom(element='H', radicalElectrons=0, charge=0, label='')
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
# Calculate the number of lone pair electrons requiring molecule with all hydrogen atoms present
if not group and lonePairsOfElectrons == -1:
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
for atom in atoms:
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
lonePairs = (1 if atom.symbol == 'H' or atom.symbol == 'He' else 4) - order - radical
atom.setLonePairs(lonePairs)
atom.updateCharge()
elif not group:
for atom in atoms:
atom.updateCharge()
except InvalidAdjacencyListError:
logging.error("Troublesome adjacency list:\n" + adjlist)
raise
return atoms, multiplicity | 0c54ee172948437f9cb075c5880eb7eb25d2893f | 3,658,038 |
def read_arg_optional(
src, args, n_optional=-1, tolerance=0, mode=MODE_NON_MATH, skip_math=False):
"""Read next optional argument from buffer.
If the command has remaining optional arguments, look for:
a. A spacer. Skip the spacer if it exists.
b. A bracket delimiter. If the optional argument is bracket-delimited,
the contents of the bracket group are used as the argument.
:param Buffer src: a buffer of tokens
:param TexArgs args: existing arguments to extend
:param int n_optional: Number of optional arguments. If < 0, all valid
bracket groups will be captured.
:param int tolerance: error tolerance level (only supports 0 or 1)
:param str mode: math or not math mode
:return: number of remaining optional arguments
:rtype: int
"""
while n_optional != 0:
spacer = read_spacer(src)
if not (src.hasNext() and src.peek().category == TC.BracketBegin):
if spacer:
src.backward(1)
break
args.append(read_arg(src, next(src), tolerance=tolerance, mode=mode, skip_math=skip_math))
n_optional -= 1
return n_optional | 641fe9ab9a96b6e59e15b115abe843fa09a07659 | 3,658,039 |
def searcheduxapian_ajax_get_schlagwort(request, item_container):
""" moegliche Schlagworte """
schlagworte = get_schlagworte(request.GET['query'])
res = '<items>\n'
for schlagwort in schlagworte:
res += '<schlagwort>\n<name><![CDATA[%s]]></name>\n</schlagwort>\n' % schlagwort.name
res += '</items>\n'
return HttpResponse(res, mimetype="text/xml; charset=utf-8") | 5a248ced5006d49f2dc303c68957d07ba187c3d5 | 3,658,040 |
def expanded_X_y_sample_weights(X, y_proba, expand_factor=10,
sample_weight=None, shuffle=True,
random_state=None):
"""
scikit-learn can't optimize cross-entropy directly if target
probability values are not indicator vectors.
As a workaround this function expands the dataset according to
target probabilities. ``expand_factor=None`` means no dataset
expansion.
"""
rng = check_random_state(random_state)
if expand_factor:
if sample_weight is not None:
X, y, sample_weight = zip(*expand_dataset(X, y_proba,
factor=expand_factor,
random_state=rng,
extra_arrays=[
sample_weight
]))
else:
X, y = zip(*expand_dataset(X, y_proba,
factor=expand_factor,
random_state=rng))
else:
y = y_proba.argmax(axis=1)
if isinstance(X, (list, tuple)) and len(X) and issparse(X[0]):
X = vstack(X)
if shuffle:
if sample_weight is not None:
X, y, sample_weight = _shuffle(X, y, sample_weight,
random_state=rng)
else:
X, y = _shuffle(X, y, random_state=rng)
return X, y, sample_weight | 7398062d3eb75fa68c39e20415b944e58a20387e | 3,658,041 |
def refine_uniformly(dom, seg):
"""
Refine all edges of the given domain and segmentation.
:param dom: Domain to refine
:type dom: :class:`viennagrid.Domain`
:param seg: Segmentation of the domain to refine
:type seg: :class:`viennagrid.Segmentation`
:returns: A two-element tuple containing the output domain and segmentation after the refinement.
:raises: TypeError
"""
try:
config = dom.config
dom = dom._domain
except AttributeError:
raise TypeError('parameter at position 1 is not a valid domain')
try:
seg = seg._segmentation
except AttributeError:
raise TypeError('parameter at position 2 is not a valid domain')
refined_result = viennagrid.wrapper.refine_uniformly(dom, seg)
refined_domain = viennagrid.Domain(config)
refined_domain._domain = refined_result[0]
refined_segmentation = viennagrid.Segmentation(refined_domain)
refined_segmentation._segmentation = refined_result[1]
return (refined_domain, refined_segmentation) | 623b9fc2fa6c83133ca1e01714fecba7e70ab95e | 3,658,042 |
def rename_tuning(name, new_name):
"""rename tuning"""
session = tables.get_session()
if session is None:
return False, 'connect'
try:
tuning_table = TuningTable()
if not tuning_table.check_exist_by_name(TuningTable, name, session):
return False, 'tuning not exist'
if tuning_table.check_exist_by_name(TuningTable, new_name, session):
return False, 'duplicate'
tuning_table.update_tuning_name(name, new_name, session)
session.commit()
except SQLAlchemyError as err:
LOGGER.error('Rename tuning failed: %s', err)
return False, 'error'
finally:
session.close()
return True, '' | 1ea1498483fc9abe0bb5be7a7c892c6a171b5df9 | 3,658,043 |
import re
def _xfsdump_output(data):
"""
Parse CLI output of the xfsdump utility.
"""
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out["Session ID"] = line.split(" ")[-1]
elif line.startswith("session label:"):
out["Session label"] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out["Media size"] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out["Dump complete"] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out["Status"] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out["Summary"] = " ".join(summary)
return out | dbc7fbf9dced99b83a7dc5917c473a1dee16d749 | 3,658,045 |
def get_current():
"""Return the currently running interpreter."""
id = _interpreters.get_current()
return Interpreter(id) | 0949280d364cc6f2935b9109c19c508ed06352b8 | 3,658,046 |
def csstext(text: str, cls: str, span: bool=False, header: bool=False) -> str:
"""
Custom build HTML text element.
"""
if span:
tag = 'span'
elif header:
tag = 'h1'
else:
tag = 'p'
return f'<{tag} class="{cls}">{str(text)}</{tag}>' | 0833fd9d83143e09b5c234e193a8e53ef653112b | 3,658,047 |
def trans_exam_list_to_colum(example_list, headers=None):
"""
将example列表转换成以列表示的形式,用于适配输出附加信息
:param example_list: example 列表
:param headers: 需要的属性,默认为("question", "answer", "yes_or_no")
:return: {header1:[...],header2:[...],...}
"""
if headers is None:
headers = ("question", "answer", "yes_or_no")
result = {}
for header in headers:
result[header] = []
for example in example_list:
for header in headers:
result[header].append(getattr(example, header, ""))
return result | ff5a2e5f6e27ce0a32717e55ba35dbd864a11dbb | 3,658,048 |
def member():
""" RESTful CRUD Controller """
return s3_rest_controller() | 2f14df1f9b97ee4777c2ce0740207c691aedb1c2 | 3,658,049 |
from datetime import datetime
def _now():
"""Get EST localized now datetime."""
return EST_TIMEZONE.localize(datetime.datetime.now()) | a7a62b5f5febdbacab0c0ac1e6ef0de843f09a11 | 3,658,050 |
def pydantic_model_to_pandas(pydantic_model_input) -> pd.DataFrame:
"""
Function that transforms <pydantic.BaseModel> child objects to
<pandas.DataFrame> objects
:param pydantic_model_input: Input validator for API
"""
return dict_to_pandas(pydantic_model_input.dict()) | 8397c39d7c760ad44565a7b89013d95c241413ed | 3,658,051 |
def calculate_pair_energy(coordinates, i_particle, box_length, cutoff):
"""
Calculate the interaction energy of a particle with its environment (all other particles in the system) - rewrite
Parameters
----------
coordinates : list
The coordinates for all particles in the system
i_particle : int
The particle number for which to calculate the energy
cutoff : float
The simulation cutoff. Beyond this distance, interactions are not calculated.
Returns
-------
e_total : float
The pairwise interaction energy of he i_th particle with all other particles in the system.
"""
e_total = 0.0
i_position = coordinates[i_particle]
distance_array = calculate_distance(coordinates, i_position, box_length)
# Just so we don't use it for calculation
distance_array[i_particle] = cutoff*2
less_than_cutoff = distance_array[distance_array < cutoff]
interaction_energies = calculate_LJ(less_than_cutoff)
e_total = np.sum(interaction_energies)
return e_total | 42150ff5282731b13e4ac512c08fd71566f0bdb4 | 3,658,052 |
def simulation_activation(model, parcel_df, aerosols_panel):
""" Given the DataFrame output from a parcel model simulation, compute
activation kinetic limitation diagnostics.
Parameters
----------
model : ParcelModel
The ParcelModel
parcel_df : DataFrame used to generate the results to be analyzed
The DataFrame containing the parcel's thermodynamic trajectory
aerosols_panel : Panel
A Panel collection of DataFrames containing the aerosol size evolution
Returns
-------
act_stats : DataFrame
A DataFrame containing the activation statistics
"""
initial_row = parcel_df.iloc[0]
Smax_i, T_i = initial_row['S'], initial_row['T']
acts = {'eq': [], 'kn': [], 'alpha': [], 'phi': []}
initial_aerosols = model.aerosols
N_all_modes = np.sum([aer.total_N for aer in initial_aerosols])
N_fracs = {aer.species: aer.total_N/N_all_modes for aer in initial_aerosols}
for i in range(len(parcel_df)):
row_par = parcel_df.iloc[i]
rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel}
# Update thermo
T_i = row_par['T']
if row_par['S'] > Smax_i:
Smax_i = row_par['S']
eq_tot, kn_tot, alpha_tot, phi_tot = 0., 0., 0., 0.
for aerosol in initial_aerosols:
N_frac = N_fracs[aerosol.species]
rs = rows_aer[aerosol.species]
eq, kn, alpha, phi = binned_activation(Smax_i, T_i, rs, aerosol)
eq_tot += eq*N_frac
kn_tot += kn*N_frac
alpha_tot += alpha*N_frac
phi_tot += phi*N_frac
acts['kn'].append(kn_tot)
acts['eq'].append(eq_tot)
acts['alpha'].append(alpha_tot)
acts['phi'].append(phi_tot)
acts_total = pd.DataFrame(acts, index=parcel_df.index)
return acts_total | 41461da13062177124ca4ebedc801ff5d574fbb8 | 3,658,053 |
def create_config(
case=None, Exp='Dummy', Type='Tor',
Lim=None, Bump_posextent=[np.pi/4., np.pi/4],
R=None, r=None, elong=None, Dshape=None,
divlow=None, divup=None, nP=None,
returnas=None, strict=None,
SavePath='./', path=_path_testcases,
):
""" Create easily a tofu.geom.Config object
In tofu, a Config (short for geometrical configuration) refers to the 3D
geometry of a fusion device.
It includes, at least, a simple 2D polygon describing the first wall of the
fusion chamber, and can also include other structural elements (tiles,
limiters...) that can be non-axisymmetric.
To create a simple Config, provide either the name of a reference test
case, of a set of geometrical parameters (major radius, elongation...).
This is just a tool for fast testing, if you want to create a custom
config, use directly tofu.geom.Config and provide the parameters you want.
Parameters
----------
case : str
The name of a reference test case, if provided, this arguments is
sufficient, the others are ignored
Exp : str
The name of the experiment
Type : str
The type of configuration (toroidal 'Tor' or linear 'Lin')
Lim_Bump: list
The angular (poloidal) limits, in the cross-section of the extension of
the outer bumper
R : float
The major radius of the center of the cross-section
r : float
The minor radius of the cross-section
elong: float
An elongation parameter (in [-1;1])
Dshape: float
A parameter specifying the D-shape of the cross-section (in [-1;1])
divlow: bool
A flag specifying whether to include a lower divertor-like shape
divup: bool
A flag specifying whether to include an upper divertor-like shape
nP: int
Number of points used to describe the cross-section polygon
out: str
FLag indicating whether to return:
- 'dict' : the polygons as a dictionary of np.ndarrays
- 'object': the configuration as a tofu.geom.Config instance
returnas: object / dict
Flag indicating whether to return the config as:
- object: a Config instance
- dict: a dict of Struct instances
strict: bool
Flag indicating whether to raise an error if a Struct cannot be loaded
Otherwise only raises a warning
path: str
Absolute path where to find the test case data
SavePath: str
The default path used for saving Struct and Config objects returned by
the routine.
Return
------
conf: tofu.geom.Config / dict
Depending on the value of parameter out, either:
- the tofu.geom.Config object created
- a dictionary of the polygons and their pos/extent (if any)
"""
lp = [R, r, elong, Dshape, divlow, divup, nP]
lpstr = '[R, r, elong, Dshape, divlow, divup, nP]'
lc = [case is not None,
any([pp is not None for pp in lp])]
if np.sum(lc) > 1:
msg = ("Please provide either:\n"
+ "\t- case: the name of a pre-defined config\n"
+ "\t- geometrical parameters {}\n\n".format(lpstr))
raise Exception(msg)
elif not any(lc):
msg = get_available_config(verb=False, returnas=str)
raise Exception(msg)
# Get config, either from known case or geometrical parameterization
if case is not None:
conf = _create_config_testcase(
config=case,
path=path,
returnas=returnas,
strict=strict,
)
else:
poly, pbump, pbaffle = _compute_VesPoly(R=R, r=r,
elong=elong, Dshape=Dshape,
divlow=divlow, divup=divup,
nP=nP)
if returnas == 'dict':
conf = {'Ves':{'Poly':poly},
'Baffle':{'Poly':pbaffle},
'Bumper':{'Poly':pbump,
'pos':Bump_posextent[0],
'extent':Bump_posextent[1]}}
else:
ves = _core.Ves(Poly=poly, Type=Type, Lim=Lim, Exp=Exp, Name='Ves',
SavePath=SavePath)
baf = _core.PFC(Poly=pbaffle, Type=Type, Lim=Lim,
Exp=Exp, Name='Baffle', color='b', SavePath=SavePath)
bump = _core.PFC(Poly=pbump, Type=Type,
pos=Bump_posextent[0], extent=Bump_posextent[1],
Exp=Exp, Name='Bumper', color='g', SavePath=SavePath)
conf = _core.Config(Name='Dummy', Exp=Exp, lStruct=[ves,baf,bump],
SavePath=SavePath)
return conf | e9f855ff614cd511f730afd34c34e1d610b06a43 | 3,658,055 |
def is_project_description(description):
"""Validates the specified project description.
A valid description is simply a non-empty string.
Args:
description (str): A project description to validate.
Returns:
<bool, str|None>: A pair containing the value True if the specified description
is valid, False otherwise; and an error message in case the description is invalid.
"""
try:
return (False, "A project description must be a non-empty string.") if is_empty_string(description) else (True, None)
except TypeError:
return (False, "The 'description' argument must be a string.") | ef831f2ddeede75bb1dbd0730dccacba3e379c2b | 3,658,056 |
import json
def remove_friend():
"""
Accepts an existing friend request.
"""
data = json.loads(request.data)
friend_id = data['id']
user = interface.get_user_by_id(get_jwt_identity())
friend = interface.get_user_by_id(friend_id)
interface.remove_friendship(user, friend)
return '', 200 | 0d5e2c390d5da7ff1869d907bbe85bcda80a9513 | 3,658,057 |
import functools
import logging
def ensure_configured(func):
"""Modify a function to call ``basicConfig`` first if no handlers exist."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(logging.root.handlers) == 0:
basicConfig()
return func(*args, **kwargs)
return wrapper | 2c04afd53ab9c7341fc4913485a8a1f7f7e7e1b3 | 3,658,058 |
from typing import Dict
from typing import Any
from typing import Optional
from typing import Type
def get_loss(dataset_properties: Dict[str, Any], name: Optional[str] = None) -> Type[Loss]:
"""
Utility function to get losses for the given dataset properties.
If name is mentioned, checks if the loss is compatible with
the dataset properties and returns the specific loss
Args:
dataset_properties (Dict[str, Any]): Dictionary containing
properties of the dataset. Must contain task_type and
output_type as strings.
name (Optional[str]): name of the specific loss
Returns:
Type[torch.nn.modules.loss._Loss]
"""
assert 'task_type' in dataset_properties, \
"Expected dataset_properties to have task_type got {}".format(dataset_properties.keys())
assert 'output_type' in dataset_properties, \
"Expected dataset_properties to have output_type got {}".format(dataset_properties.keys())
task = STRING_TO_TASK_TYPES[dataset_properties['task_type']]
output_type = STRING_TO_OUTPUT_TYPES[dataset_properties['output_type']]
supported_losses = get_supported_losses(task, output_type)
if name is not None:
if name not in supported_losses.keys():
raise ValueError("Invalid name entered for task {}, and output type {} currently supported losses"
" for task include {}".format(dataset_properties['task_type'],
dataset_properties['output_type'],
list(supported_losses.keys())))
else:
loss = supported_losses[name]
else:
loss = get_default(task)
return loss | a9f75d6e2c35a0b9472e3fdc046f72eb1188e48d | 3,658,059 |
def ten_to_base(value : int, base):
"""Converts a given decimal value into the specified base.
:param value: The number to convert
:param base: The base to convert the specified number to
:return: The converted value in the specified base
"""
# Check if the base is 10, return the value
if base == 10:
return value
# Keep track of the remainders, which will be the new digits in the specified base
remainders = []
# Divide the value by the base until the number is 0
while value != 0:
remainders.append(value % base)
value //= base
# Reverse the order of the remainders and turn each digit
# into the proper value from the BASES string
remainders.reverse()
for i in range(len(remainders)):
remainders[i] = BASES[remainders[i]]
return "".join(remainders) | 2f5ba92af48fe2ce19dbcb6001afadfde2514373 | 3,658,060 |
def get_style(selector, name):
"""
Returns the resolved CSS style for the given property name.
:param selector:
:param name:
"""
if not get_instance():
raise Exception("You need to start a browser first with open_browser()")
return get_style_g(get_instance(), selector, name) | 903e4abc09dc196d0d1dbbbb3c58869e3c0beb78 | 3,658,061 |
import inspect
def argmod(*args):
"""
Decorator that intercepts and modifies function arguments.
Args:
from_param (str|list): A parameter or list of possible parameters that
should be modified using `modifier_func`. Passing a list of
possible parameters is useful when a function's parameter names
have changed, but you still want to support the old parameter
names.
to_param (str): Optional. If given, to_param will be used as the
parameter name for the modified argument. If not given, to_param
will default to the last parameter given in `from_param`.
modifier_func (callable): The function used to modify the `from_param`.
Returns:
function: A function that modifies the given `from_param` before the
function is called.
"""
from_param = listify(args[0])
to_param = from_param[-1] if len(args) < 3 else args[1]
modifier_func = args[-1]
def _decorator(func):
try:
argspec = inspect.getfullargspec(unwrap(func))
except AttributeError:
argspec = inspect.getargspec(unwrap(func))
if to_param not in argspec.args:
return func
arg_index = argspec.args.index(to_param)
@wraps(func)
def _modifier(*args, **kwargs):
kwarg = False
for arg in from_param:
if arg in kwargs:
kwarg = arg
break
if kwarg:
kwargs[to_param] = modifier_func(kwargs.pop(kwarg))
elif arg_index < len(args):
args = list(args)
args[arg_index] = modifier_func(args[arg_index])
return func(*args, **kwargs)
return _modifier
return _decorator | 5824d20568a3913be59941df0e4f657f05f08cc0 | 3,658,062 |
def group_update(group_id, group_min, group_max, desired):
"""
Test with invalid input
>>> group_update('foo', 2, 1, 4)
{}
"""
if group_min > group_max or desired < group_min or desired > group_max:
return {}
try:
client = boto3.client('autoscaling')
response = client.update_auto_scaling_group(
AutoScalingGroupName=group_id,
MinSize=group_min,
MaxSize=group_max,
DesiredCapacity=desired)
except botocore.exceptions.ClientError:
print "Autoscaling client error: update_auto_scaling_group"
sys.exit(127)
return response | 77eef10b7db604a3aa2e32bdd0d226fb44cf07ab | 3,658,063 |
def remove_bookmark(request, id):
"""
This view deletes a bookmark.
If requested via ajax it also returns the add bookmark form to replace the
drop bookmark form.
"""
bookmark = get_object_or_404(Bookmark, id=id, user=request.user)
if request.method == "POST":
bookmark.delete()
if not is_xhr(request):
messages.success(request, "Bookmark removed")
if request.POST.get("next"):
return HttpResponseRedirect(request.POST.get("next"))
return HttpResponse("Deleted")
return render(
request,
"admin_tools/menu/add_bookmark_form.html",
context={
"url": request.POST.get("next"),
"title": "**title**", # replaced on the javascript side
},
)
return render(
request,
"admin_tools/menu/delete_confirm.html",
context={"bookmark": bookmark, "title": "Delete Bookmark"},
) | 9c8442d5a313e7babf71b9f9a4c41452c65c5aab | 3,658,064 |
def parse_propa(blob):
"""Creates new blob entries for the given blob keys"""
if "track_in" in blob.keys():
muon = blob["track_in"]
blob["Muon"] = Table(
{
"id": np.array(muon)[:, 0].astype(int),
"pos_x": np.array(muon)[:, 1],
"pos_y": np.array(muon)[:, 2],
"pos_z": np.array(muon)[:, 3],
"dir_x": np.array(muon)[:, 4],
"dir_y": np.array(muon)[:, 5],
"dir_z": np.array(muon)[:, 6],
"energy": np.array(muon)[:, 7],
"time": np.array(muon)[:, 8],
"particle_id": np.array(muon)[:, 9].astype(int),
"is_charm": np.array(muon)[:, 10].astype(int),
"mother_pid": np.array(muon)[:, 11].astype(int),
"grandmother_pid": np.array(muon)[:, 11].astype(int),
},
h5loc="muon",
)
blob["MuonMultiplicity"] = Table(
{"muon_multiplicity": len(np.array(muon)[:, 6])}, h5loc="muon_multiplicity"
)
if "neutrino" in blob.keys():
nu = blob["neutrino"]
blob["Neutrino"] = Table(
{
"id": np.array(nu)[:, 0].astype(int),
"pos_x": np.array(nu)[:, 1],
"pos_y": np.array(nu)[:, 2],
"pos_z": np.array(nu)[:, 3],
"dir_x": np.array(nu)[:, 4],
"dir_y": np.array(nu)[:, 5],
"dir_z": np.array(nu)[:, 6],
"energy": np.array(nu)[:, 7],
"time": np.array(nu)[:, 8],
"particle_id": np.array(nu)[:, 9].astype(int),
"is_charm": np.array(nu)[:, 10].astype(int),
"mother_pid": np.array(nu)[:, 11].astype(int),
"grandmother_pid": np.array(nu)[:, 11].astype(int),
},
h5loc="nu",
)
blob["NeutrinoMultiplicity"] = Table(
{
"total": len(np.array(nu)[:, 6]),
"nue": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == 12]),
"anue": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == -12]),
"numu": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == 14]),
"anumu": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == -14]),
},
h5loc="nu_multiplicity",
)
if ("track_in" or "neutrino") in blob.keys():
blob["Weights"] = Table(
{
"w1": blob["weights"][0][0],
"w2": blob["weights"][0][1],
"w3": blob["weights"][0][2],
},
h5loc="weights",
)
if "track_primary" in blob.keys():
primary = blob["track_primary"]
blob["Primary"] = Table(
{
"id": np.array(primary)[:, 0].astype(int),
"pos_x": np.array(primary)[:, 1],
"pos_y": np.array(primary)[:, 2],
"pos_z": np.array(primary)[:, 3],
"dir_x": np.array(primary)[:, 4],
"dir_y": np.array(primary)[:, 5],
"dir_z": np.array(primary)[:, 6],
"energy": np.array(primary)[:, 7],
"time": np.array(primary)[:, 8],
"particle_id": np.array(primary)[:, 9].astype(int),
},
h5loc="primary",
)
return blob | ae7993d6e51287b6a88d125f63ef7d6edd001cf1 | 3,658,065 |
def parseParams(opt):
"""Parse a set of name=value parameters in the input value.
Return list of (name,value) pairs.
Raise ValueError if a parameter is badly formatted.
"""
params = []
for nameval in opt:
try:
name, val = nameval.split("=")
except ValueError:
raise ValueError("Bad name=value format for '%s'" % nameval)
params.append((name, val))
return params | b932f74c8e5502ebdd7a8749c2de4b30921d518b | 3,658,066 |
from ._sparse_array import SparseArray
def asnumpy(a, dtype=None, order=None):
"""Returns a dense numpy array from an arbitrary source array.
Args:
a: Arbitrary object that can be converted to :class:`numpy.ndarray`.
order ({'C', 'F', 'A'}): The desired memory layout of the output
array. When ``order`` is 'A', it uses 'F' if ``a`` is
fortran-contiguous and 'C' otherwise.
Returns:
numpy.ndarray: Converted array on the host memory.
"""
if isinstance(a, SparseArray):
a = a.todense()
return np.array(a, dtype=dtype, copy=False, order=order) | 54bea22ab6fe8327b3a4df93ac9e4447b4d65fec | 3,658,067 |
def get_next_cpi_date():
"""
Get next CPI release date
"""
df = pd.read_html(r"https://www.bls.gov/schedule/news_release/cpi.htm")[0][:-1]
df["Release Date"] = pd.to_datetime(df["Release Date"], errors='coerce')
df = df[df["Release Date"] >= current_date].iloc[0]
df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')
return df | e23b9bea0996ac442115163729ffeed1407e52b5 | 3,658,068 |
from typing import Tuple
from datetime import datetime
def date_arithmetic() -> Tuple[datetime, datetime, int]:
""" This function is used to calculate
what is the date after 3 days is given
and the differences between two given dates """
date1: str = "Feb 27, 2020"
date_2020: datetime = datetime.strptime(
date1, "%b %d, %Y") + timedelta(3)
date2: str = "Feb 27, 2019"
date_2019: datetime = datetime.strptime(
date2, "%b %d, %Y") + timedelta(3)
date3: str = "Feb 1, 2019"
date4: str = "Sep 30, 2019"
days_passed: int = datetime.strptime(
date3, "%b %d, %Y") - datetime.strptime(date4, "%b %d, %Y")
three_days_after_02272020: datetime = date_2020.strftime("%b %d, %Y")
three_days_after_02272019: datetime = date_2019.strftime("%b %d, %Y")
days_passed_02012019_09302019: int = abs(days_passed.days)
return three_days_after_02272020, three_days_after_02272019, days_passed_02012019_09302019 | 1e2d4681578ccab11612771589a46f22246071eb | 3,658,069 |
def get_words_from_line_list(text):
"""
Applies Translations and returns the list of words from the text document
"""
text = text.translate(translation_table)
word_list = [x for x in text.split() if x not in set(stopwords.words('english'))]
return word_list | aaa2a1476e887aa6a7d477d67528f838d6f229b9 | 3,658,070 |
def _get_name(dist):
"""Attempts to get a distribution's short name, excluding the name scope."""
return getattr(dist, 'parameters', {}).get('name', dist.name) | fd57e523c1a84a36f9ed56236e4b8db1e887575c | 3,658,071 |
def compute_mean_std(all_X):
"""Return an approximate mean and std for every feature"""
concatenated = np.concatenate(all_X, axis=0).astype(np.float64)
mean = np.mean(concatenated, axis=0)
std = np.std(concatenated, axis=0)
std[std == 0] = 1
return mean, std | b102a045705efdab8d9783e04da192ec30e167f7 | 3,658,072 |
def GenerateConfig(context):
"""Generates configuration."""
key_ring = {
'name': 'keyRing',
'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings',
'properties': {
'parent': 'projects/' + context.env['project'] + '/locations/' + context.properties['region'],
'keyRingId': context.env['deployment'] + '-key-ring'
}
}
crypto_key = {
'name': 'cryptoKey',
'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings.cryptoKeys',
'properties': {
'parent': '$(ref.keyRing.name)',
'cryptoKeyId': context.env['deployment'] + '-crypto-key',
'purpose': 'ENCRYPT_DECRYPT'
}
}
resources = [key_ring, crypto_key]
outputs = [{
'name': 'primaryVersion',
'value': '$(ref.cryptoKey.primary)'
}]
return { 'resources': resources, 'outputs': outputs } | 257b7217c1a08bba46866aff0b7faa1a03fe7fdc | 3,658,073 |
def get_valid_collapsed_products(products, limit):
"""wraps around collapse_products and respecting a limit
to ensure that uncomplete products are not collapsed
"""
next_min_scanid = get_next_min_scanid(products, limit)
collapsed_products = []
for scanid, scan in groupby(products, itemgetter('ScanID')):
if scanid == next_min_scanid:
continue
collapsed_products.extend(collapse_products(list(scan)))
return collapsed_products, next_min_scanid | df3ffa503855a020c7c011aa58cba20243e19be4 | 3,658,074 |
def get_imd():
"""Fetches data about LA IMD status.
The "local concentration" measure is used -
this gives higher weight to particularly deprived areas
Source: http://www.gov.uk/government/statistics/english-indices-of-deprivation-2019
"""
imd = pd.read_csv(
PROJECT_DIR / "inputs/data/societal-wellbeing_imd2019_indicesbyla.csv",
usecols=[1, 2],
skiprows=7,
)
return imd | 4e2495dda505bde8dd8ccf62234730a7227ffa97 | 3,658,075 |
def read_bgr(file):
"""指定ファイルからBGRイメージとして読み込む.
# Args:
file: イメージファイル名.
# Returns:
成功したらイメージ、失敗したら None.
"""
return cv2.imread(file, cv2.IMREAD_COLOR) | ee96842899ffefa0508218d0a4b721f2ae5a7efb | 3,658,076 |
def _remove_none_from_post_data_additional_rules_list(json):
"""
removes hidden field value from json field "additional_rules" list,
which is there to ensure field exists for editing purposes
:param json: this is data that is going to be posted
"""
data = json
additional_rules = json.get("additional_rules", None)
if additional_rules and "None" in additional_rules:
new_additional_rules = []
for rule in additional_rules:
if rule != "None":
new_additional_rules.append(rule)
data["additional_rules"] = new_additional_rules
return data | c82aa568f82ba4abcb8f4e6f9c770969277d078f | 3,658,077 |
import traceback
def add_email(request, pk):
"""
This Endpoint will add the email id into
the person contact details.
It expects personId in URL param.
"""
try:
request_data = request.data
email = request_data.get("email")
person = Person.objects.filter(id=pk).last()
if email:
Email.objects.create(
email=email,
person_id=person.id
)
serializer = PersonDetailSerializer(person)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
except:
print(traceback.format_exc())
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) | 5669b442e7ef4fb3e5a22053c368e6a4b68cfeef | 3,658,078 |
from typing import Any
def coords_extracter():
"""Exctract coords to send command to robot.
To be executed inside of xarm_hand_control module."""
SKIPPED_COMMANDS = 5
COEFF = 22
current = [0]
def coords_to_command(data: Any):
current[0] += 1
if current[0] < SKIPPED_COMMANDS:
return
current[0] = 0
if np.linalg.norm(data[0:2], 2) < 0.05:
return
x = data[0] * COEFF / 1000
z = data[1] * COEFF / 1000
# speed = np.linalg.norm(data, ord=2) * COEFF * 50
# speed = int(speed)
# # speed = np.log(speed) * COEFF
# mvacc = speed * 10
speed = 500
mvacc = speed * 10
command = Command(
x=x,
y=0.0,
z=z,
speed=speed,
acc=mvacc,
is_radian=True,
is_cartesian=True,
is_relative=True,
)
# print(command)
send_command(command)
return coords_to_command | 930cef91e517751da3c3fb441543ab736be6aa23 | 3,658,079 |
def NO_MERGE(writer, segments):
"""This policy does not merge any existing segments.
"""
return segments | 0742365f30d59cb219ac60483b867180bd910ba8 | 3,658,080 |
def build_ntwk(p, s_params):
"""
Construct a network object from the model and
simulation params.
"""
np.random.seed(s_params['RNG_SEED'])
# set membrane properties
n = p['N_PC'] + p['N_INH']
t_m = cc(
[np.repeat(p['T_M_PC'], p['N_PC']), np.repeat(p['T_M_INH'], p['N_INH'])])
e_l = cc(
[np.repeat(p['E_L_PC'], p['N_PC']), np.repeat(p['E_L_INH'], p['N_INH'])])
v_th = cc(
[np.repeat(p['V_TH_PC'], p['N_PC']), np.repeat(p['V_TH_INH'], p['N_INH'])])
v_r = cc(
[np.repeat(p['V_R_PC'], p['N_PC']), np.repeat(p['V_R_INH'], p['N_INH'])])
t_rp = cc(
[np.repeat(p['T_R_PC'], p['N_PC']), np.repeat(p['T_R_INH'], p['N_INH'])])
# set latent nrn positions
lb = [-s_params['BOX_W']/2, -s_params['BOX_H']/2]
ub = [s_params['BOX_W']/2, s_params['BOX_H']/2]
# sample evenly spaced place fields
## E cells
pfxs_e, pfys_e = cxn.apx_lattice(lb, ub, p['N_PC'], randomize=True)
## I cells
pfxs_i, pfys_i = cxn.apx_lattice(lb, ub, p['N_INH'], randomize=True)
## join E & I place fields
pfxs = cc([pfxs_e, pfxs_i])
pfys = cc([pfys_e, pfys_i])
# make upstream ws
if p['W_PC_PL'] > 0:
w_pc_pl_flat = np.random.lognormal(
*lognormal_mu_sig(p['W_PC_PL'], p['S_PC_PL']), p['N_PC'])
else:
w_pc_pl_flat = np.zeros(p['N_PC'])
if p['W_PC_G'] > 0:
w_pc_g_flat = np.random.lognormal(
*lognormal_mu_sig(p['W_PC_G'], p['S_PC_G']), p['N_PC'])
else:
w_pc_g_flat = np.zeros(p['N_PC'])
ws_up_temp = {
'E': {
('PC', 'PL'): np.diag(w_pc_pl_flat),
('PC', 'G'): np.diag(w_pc_g_flat),
},
}
targs_up = cc([np.repeat('PC', p['N_PC']), np.repeat('INH', p['N_INH'])])
srcs_up = cc([np.repeat('PL', p['N_PC']), np.repeat('G', p['N_PC'])])
ws_up = join_w(targs_up, srcs_up, ws_up_temp)
# make rcr ws
w_pc_pc = cxn.make_w_pc_pc(pfxs[:p['N_PC']], pfys[:p['N_PC']], p)
w_inh_pc = cxn.make_w_inh_pc(
pfxs_inh=pfxs[-p['N_INH']:],
pfys_inh=pfys[-p['N_INH']:],
pfxs_pc=pfxs[:p['N_PC']],
pfys_pc=pfys[:p['N_PC']],
p=p)
w_pc_inh = cxn.make_w_pc_inh(
pfxs_pc=pfxs[:p['N_PC']],
pfys_pc=pfys[:p['N_PC']],
pfxs_inh=pfxs[-p['N_INH']:],
pfys_inh=pfys[-p['N_INH']:],
p=p)
ws_rcr_temp = {
'E': {
('PC', 'PC'): w_pc_pc,
('INH', 'PC'): w_inh_pc,
},
'I': {
('PC', 'INH'): w_pc_inh,
},
}
targs_rcr = cc([np.repeat('PC', p['N_PC']), np.repeat('INH', p['N_INH'])])
ws_rcr = join_w(targs_rcr, targs_rcr, ws_rcr_temp)
# make ntwk
ntwk = LIFNtwk(
t_m=t_m,
e_l=e_l,
v_th=v_th,
v_r=v_r,
t_r=t_rp,
es_syn={'E': p['E_E'], 'I': p['E_I']},
ts_syn={'E': p['T_E'], 'I': p['T_I']},
ws_up=ws_up,
ws_rcr=ws_rcr)
ntwk.pfxs = pfxs
ntwk.pfys = pfys
ntwk.types_up = srcs_up
ntwk.types_rcr = targs_rcr
ntwk.n_pc = p['N_PC']
ntwk.n_inh = p['N_INH']
ntwk.n_g = p['N_PC']
ntwk.n_inp = p['N_PC']
ntwk.n_rcr = p['N_PC'] + p['N_INH']
ntwk.n_up = 2 * p['N_PC']
ntwk.types_up_slc = {
'PL': slice(0, p['N_PC']),
'G': slice(p['N_PC'], 2*p['N_PC'])
}
ntwk.types_rcr_slc = {
'PC': slice(0, p['N_PC']),
'INH': slice(p['N_PC'], p['N_PC'] + p['N_INH'])
}
return ntwk | 88a5b5c73edf015d9b2b4137db874252f63a3571 | 3,658,082 |
def createAaronWorld():
"""
Create an empty world as an example to build future projects from.
"""
# Set up a barebones project
project = makeBasicProject()
# Create sprite sheet for the player sprite
player_sprite_sheet = addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# add a sprite we can use for the rocks
a_rock_sprite = addSpriteSheet(project, "rock.png", "rock", "static")
a_dog_sprite = addSpriteSheet(project, "dog.png", "dog", "static")
# Add a background image
default_bkg = makeBackground("placeholder.png", "placeholder")
project.backgrounds.append(default_bkg)
a_scene = makeScene(f"Scene", default_bkg)
project.scenes.append(a_scene)
actor = makeActor(a_rock_sprite, 9, 8)
a_scene['actors'].append(actor)
dog_actor = makeActor(a_dog_sprite, 5, 5)
dog_script = []
element = makeElement()
element["command"] = "EVENT_ACTOR_EMOTE"
element["args"] = {
"actorId": "player",
"emoteId": "1"
}
dog_script.append(element)
element = makeElement()
element["command"] = "EVENT_END"
dog_script.append(element)
dog_actor["script"] = dog_script
a_scene['actors'].append(dog_actor)
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
return project | 7326aee61ee4977ccc422955fbd33c6a51b13e37 | 3,658,083 |
def builtin_ljustify(s, w, p):
"""Left-justify a string to a given width with a given padding character."""
sv = s.convert(BStr()).value
pv = p.convert(BStr()).value
return BStr(sv.ljust(w.value, pv)) | dda28d65d1916a7e01aa36e7b90ee5ba98329c58 | 3,658,084 |
def get_effective_router(appname):
"""Returns a private copy of the effective router for the specified application"""
if not routers or appname not in routers:
return None
return Storage(routers[appname]) | dd0e3ccc8d05864b5a324541129845e5f82c2669 | 3,658,086 |
def is_activated(user_id):
"""Checks if a user has activated their account. Returns True or false"""
cur = getDb().cursor()
cur.execute('SELECT inactive FROM users where user_id=%s', (user_id,))
inactive = cur.fetchone()[0]
cur.close()
return False if inactive is 1 else True | 704a5c3462be3612e5cd44057ee082d612ae8aa9 | 3,658,087 |
import base64
import json
def _encode(dictionary):
"""Encodes any arbitrary dictionary into a pagination token.
Args:
dictionary: (dict) Dictionary to basee64-encode
Returns:
(string) encoded page token representing a page of items
"""
# Strip ugly base64 padding.
return base64.urlsafe_b64encode(json.dumps(dictionary)).rstrip('=') | e9a490e659a3a0e6d546fd2ab4dd89a5f6a748af | 3,658,088 |
def selectPlate(plates, jdRange, normalise=False, scope='all'):
"""From a list of simulated plates, returns the optimal one."""
# Gets the JD range for the following night
nextNightJDrange = _getNextNightRange(jdRange)
# First we exclude plates without new exposures
plates = [plate for plate in plates if plate._after['nNewExposures'] > 0]
# Sorts plates by inverse plate completion.
plates = sorted(plates, reverse=True, key=lambda plate: plate.getPlateCompletion()
if plate.getPlateCompletion() <= 1 else 1. / plate.getPlateCompletion())
if len(plates) == 0:
return None
# If we are scheduling only plugged plates, we rather plug a new plate
# unless we can observe a plugged plate at least for a whole set.
availableTime = (jdRange[1] - jdRange[0]) * 24.
completionIncrease = np.array(
[plate._after['completion'] - plate._before['completion'] for plate in plates])
# minSchedulingTime ensures that if the remaining time < length of a set,
# we still use the plugged plates, if any.
if scope == 'plugged':
if (availableTime > minSchedulingTime and np.all(completionIncrease == 0)):
return None
else:
# If no plate has been observed for a whole set, we try to use first
# plates that are already plugged.
if np.all(completionIncrease == 0):
pluggedPlates = [plate for plate in plates if plate.isPlugged]
if len(pluggedPlates) > 0:
plates = pluggedPlates
# If plugger, tries to select only plates at APO
if scope == 'plugged':
platesAtAPO = [plate for plate in plates if plate.getLocation() == 'APO']
if len(platesAtAPO) > 0:
plates = platesAtAPO
# Now tries to select only plates that have been marked.
markedPlates = [
plate for plate in plates if 'Accepted' in [status.label for status in plate.statuses]
]
if len(markedPlates) > 0:
plates = markedPlates
# We check if any of the plate is complete after the simulation.
# If so, we return the one with fewer new exposures.
completePlates = [plate for plate in plates
if plate._after['completion'] > plate.completion_factor]
nNewExposures = [plate._after['nNewExposures'] for plate in completePlates]
if len(completePlates) > 0:
return completePlates[np.argmin(nNewExposures)]
# We record the real completion before and after. We will normalise the
# other completions based on our scheduling logic.
for plate in plates:
plate._before['realCompletion'] = plate._before['completion']
plate._before['realCompletion+'] = plate._before['completion+']
plate._after['realCompletion'] = plate._after['completion']
plate._after['realCompletion+'] = plate._after['completion+']
# If normalise=True, we divide the several completion values by the
# length of the observing window for the plate, normalised by the length
# of the minimum plate window. The effect of this is that plates with short
# observing windows get comparatively larger completions and, thus, have
# higher chance of being selected. This is good for plugged plates, as it
# tries to schedule first plates with short windows even if other plates
# could be completed at the time.
# We also increase the completion of plates for which we have patched sets,
# while we penalise those with incomplete sets. With this logic, we hope
# that plates are observed when their incomplete sets can be patched.
if normalise:
_normaliseWindowLength(plates, jdRange, factor=1.0, apply=True)
# We also normalise using the following night, if possible.
if nextNightJDrange is not None:
_normaliseWindowLength(plates, nextNightJDrange, factor=nextNightFactor, apply=True)
# Now we normalise plate completion using a metric that gives higher
# priority to plates for which we have patched incomplete sets.
patchedSetFactor = []
for plate in plates:
nSetsFactor = 0
for ss in plate.sets:
if not ss.isMock:
nNewExps = 0
for exp in ss.totoroExposures:
if hasattr(exp, '_tmp') and exp._tmp:
nNewExps += 1
setComplete = ss.getStatus()[0] in ['Good', 'Excellent']
if setComplete and nNewExps == 0:
pass
else:
if nNewExps > 0:
nSetsFactor += 2 * nNewExps
if setComplete:
nSetsFactor *= 2
else:
nSetsFactor -= 1
patchedSetFactor.append(1. + patchSetFactor * nSetsFactor)
_completionFactor(plates, patchedSetFactor)
# We add the priority into the mix
platePriorities = np.array([plate.priority for plate in plates]) - 5.
_completionFactor(plates, 1 + platePriorityFactor * platePriorities)
ancillaryPriorities = []
for plate in plates:
if hasattr(plate, 'ancillary_weight'):
ancillaryPriorities.append(plate.ancillary_weight)
else:
ancillaryPriorities.append(1)
_completionFactor(plates, np.array(ancillaryPriorities))
# Selects the plates that have the largest increase in completion
completionIncrease = [plate._after['completion'] - plate._before['completion']
for plate in plates if plate.completion_factor <= 1.]
if len(completionIncrease) == 0:
for plate in plates:
if plate.completion_factor > 1:
completionIncrease.append(
plate._after['completion'] - plate._before['completion'])
completionIncrease = np.array(completionIncrease)
plates = np.array(plates)
maxCompletionIncrease = np.max(completionIncrease)
plates = plates[np.where(completionIncrease == maxCompletionIncrease)]
if len(plates) == 1:
return plates[0]
# If maxCompletionIncrease is 0, it means that no plate has been
# observed for at least a set. In this case, if possible, we want to use
# a plate that already has signal.
if maxCompletionIncrease == 0:
platesWithSignal = [plate for plate in plates if plate._before['completion+'] > 0]
if len(platesWithSignal) > 0:
plates = platesWithSignal
# If several plates have maximum completion increase, use the incomplete
# sets to break the tie.
completionIncreasePlus = np.array(
[plate._after['completion+'] - plate._before['completion+'] for plate in plates])
return plates[np.argmax(completionIncreasePlus)] | f48207a9be002e6b2295d4100e02ffaddde779e8 | 3,658,090 |
from typing import Union
def get_events(
raw: mne.io.BaseRaw,
event_picks: Union[str, list[str], list[tuple[str, str]]],
) -> tuple[np.ndarray, dict]:
"""Get events from given Raw instance and event id."""
if isinstance(event_picks, str):
event_picks = [event_picks]
events = None
for event_pick in event_picks:
if isinstance(event_pick, str):
event_id = {event_pick: 1}
else:
event_id = {event_pick[0]: 1, event_pick[1]: -1}
try:
events, _ = mne.events_from_annotations(
raw=raw,
event_id=event_id,
verbose=True,
)
return events, event_id
except ValueError as error:
print(error)
_, event_id_found = mne.events_from_annotations(
raw=raw,
verbose=False,
)
raise ValueError(
f"None of the given `event_picks´ found: {event_picks}."
f"Possible events: {*event_id_found.keys(),}"
) | d1b5c961160848607a40cfcb5b2ba8a47625ab21 | 3,658,091 |
def transplant(root, u, v):
"""
注意, 这里要返回root, 不然修改不了
"""
if u.parent == None:
root = v
elif u.parent.left == u:
u.parent.left = v
else:
u.parent.right = v
if v:
v.parent = u.parent
return root | cadf0433399e428596d1d0d4ab200e4d79285d21 | 3,658,092 |
def is_head_moderator():
"""
Returns true if invoking author is a Head Moderator (role).
"""
async def predicate(ctx: Context):
if not any(config.HEAD_MOD_ROLE in role.id for role in ctx.author.roles):
raise NotStaff("The command `{}` can only be used by a Head Moderator.".format(ctx.invoked_with))
return True
return commands.check(predicate) | 22f80251190d914d38052e67ca8fe47279eab833 | 3,658,093 |
def compute_adj_matrices(type, normalize=True):
"""
Computes adjacency matrices 'n', 'd' or 's' used in GCRAM.
"""
# Get channel names
raw = mne.io.read_raw_edf('dataset/physionet.org/files/eegmmidb/1.0.0/S001/S001R01.edf', preload=True, verbose=False).to_data_frame()
ch_names = raw.columns[2:]
n_channels = 64
# Compute channel position distances using electrode positions. Required for computing 'd' and 's' adjacency matrices
ch_pos_1010 = get_sensor_pos(ch_names)
ch_pos_1010_names = []
ch_pos_1010_dist = []
for name, value in ch_pos_1010.items():
ch_pos_1010_names.append(name)
ch_pos_1010_dist.append(value)
ch_pos_1010_dist = np.array(ch_pos_1010_dist)
# Compute adjacency matrices
if type=='n':
A = n_graph()
elif type=='d':
A = d_graph(n_channels, ch_pos_1010_dist)
elif type=='s':
A = s_graph(n_channels, ch_pos_1010_dist)
# Normalize adjacency matrices
if normalize:
A = normalize_adj(A)
A = np.array(A, dtype=np.float32)
return A | cecf0c94c5d1efe6c6210949736377d5d7d454c4 | 3,658,094 |
def hasNonAsciiCharacters(sText):
"""
Returns True is specified string has non-ASCII characters, False if ASCII only.
"""
sTmp = unicode(sText, errors='ignore') if isinstance(sText, str) else sText;
return not all(ord(ch) < 128 for ch in sTmp); | c1627d1a0a26e7c4d4e04c84085197ba44b5e640 | 3,658,096 |
def draw_matches(image_1, image_1_keypoints, image_2, image_2_keypoints, matches):
""" Draws the matches between the image_1 and image_2.
(Credit: GT CP2017 course provided source)
Params:
image_1: The first image (can be color or grayscale).
image_1_keypoints: The image_1 keypoints.
image_2: The image to search in (can be color or grayscale)
image_2_keypoints: The image_2 keypoints.
Returns:
output: Image with a line drawn between matched keypoints.
"""
# Compute number of channels.
num_channels = 1
if len(image_1.shape) == 3:
num_channels = image_1.shape[2]
# Separation between images.
margin = 10
# Create an array that will fit both images (with a margin of 10 to
# separate the two images)
joined_image = np.zeros((max(image_1.shape[0], image_2.shape[0]),
image_1.shape[1] + image_2.shape[1] + margin,
3))
if num_channels == 1:
for channel_idx in range(3):
joined_image[:image_1.shape[0],
:image_1.shape[1],
channel_idx] = image_1
joined_image[:image_2.shape[0],
image_1.shape[1] + margin:,
channel_idx] = image_2
else:
joined_image[:image_1.shape[0], :image_1.shape[1]] = image_1
joined_image[:image_2.shape[0], image_1.shape[1] + margin:] = image_2
for match in matches:
image_1_point = (int(image_1_keypoints[match.queryIdx].pt[0]),
int(image_1_keypoints[match.queryIdx].pt[1]))
image_2_point = (int(image_2_keypoints[match.trainIdx].pt[0] +
image_1.shape[1] + margin),
int(image_2_keypoints[match.trainIdx].pt[1]))
rgb = (np.random.rand(3) * 255).astype(np.int)
cv2.circle(joined_image, image_1_point, 5, rgb, thickness=-1)
cv2.circle(joined_image, image_2_point, 5, rgb, thickness=-1)
cv2.line(joined_image, image_1_point, image_2_point, rgb, thickness=3)
return joined_image | d2984de4c542fca7dda3863ad934e9eddc14a375 | 3,658,097 |
def copy_ttl_in():
"""
COPY_TTL_IN Action
"""
return _action("COPY_TTL_IN") | a01acb2645e033ad658435e9ca0323aec10b720c | 3,658,098 |
import torch
from typing import Optional
def neuron_weight(
layer: str,
weight: torch.Tensor,
x: Optional[int] = None,
y: Optional[int] = None,
batch: Optional[int] = None,
) -> Objective:
"""Linearly weighted channel activation at one location as objective
:param layer: Name of the layer
:type layer: str
:param weight: A torch.Tensor of same length as the number of channels
:type weight: torch.Tensor
:param x: x-position, defaults to None
:type x: Optional[int], optional
:param y: y-position, defaults to None
:type y: Optional[int], optional
:param batch: which position at the batch dimension of the image tensor this objective is applied to, defaults to None
:type batch: Optional[int], optional
:return: Objective to optimize input for a linearly weighted channel activation at one location
:rtype: Objective
"""
@handle_batch(batch)
def inner(model):
layer_t = model(layer)
layer_t = _extract_act_pos(layer_t, x, y)
if weight is None:
return -layer_t.mean()
else:
return -(layer_t.squeeze() * weight).mean()
return inner | 646966613249a02468e00b91157fd43459d246bb | 3,658,099 |
def genargs() -> ArgumentParser:
"""
Generate an input string parser
:return: parser
"""
parser = ArgumentParser()
parser.add_argument("indir", help="Location of input shexj files")
parser.add_argument("outdir", help="Location of output shexc files")
parser.add_argument("-s", "--save", help="Save edited shexj image before conversion", action="store_true")
return parser | 1958d772e316212f90d6e3b84c5452e4fc02f2da | 3,658,100 |
def get_model_name(factory_class):
"""Get model fixture name by factory."""
return (
inflection.underscore(factory_class._meta.model.__name__)
if not isinstance(factory_class._meta.model, str) else factory_class._meta.model) | 1021f287803b5e6dd9231503d8ddab15c355a800 | 3,658,101 |
def GetSpatialFeatures(img, size=(32, 32), isFeatureVector=True):
""" Extracts spatial features of the image.
param: img: Source image
param: size: Target image size
param: isFeatureVector: Indication if the result needs to be unrolled into a feature vector
returns: Spatial features
"""
resizedImg = cv2.resize(img, size)
if isFeatureVector:
return resizedImg.ravel()
else:
return resizedImg | 36d864bddb125f7cb13c4bd800076733ab939d58 | 3,658,102 |
import html
import logging
import re
def html_to_text(content):
"""Filter out HTML from the text."""
text = content['text']
try:
text = html.document_fromstring(text).text_content()
except etree.Error as e:
logging.error(
'Syntax error while processing {}: {}\n\n'
'Falling back to regexes'.format(text, e))
text = re.sub(r'<[^>]*>', '', text)
text = _to_unicode(text)
content['text'] = text
return content | 43fc18400ef121bf12f683da03763dff229d45ae | 3,658,103 |
async def get_robot_positions() -> control.RobotPositionsResponse:
"""
Positions determined experimentally by issuing move commands. Change
pipette position offsets the mount to the left or right such that a user
can easily access the pipette mount screws with a screwdriver. Attach tip
position places either pipette roughly in the front-center of the deck area
"""
robot_positions = control.RobotPositions(
change_pipette=control.ChangePipette(
target=control.MotionTarget.mount, left=[300, 40, 30], right=[95, 40, 30]
),
attach_tip=control.AttachTip(
target=control.MotionTarget.pipette, point=[200, 90, 150]
),
)
return control.RobotPositionsResponse(positions=robot_positions) | 816f1794231aa2690665caa8eae26c301d55b198 | 3,658,104 |
def compute_basis(normal):
""" Compute an orthonormal basis for a vector. """
u = [0.0, 0.0, 0.0]
v = [0.0, 0.0, 0.0]
u[0] = -normal[1]
u[1] = normal[0]
u[2] = 0.0
if ((u[0] == 0.0) and (u[1] == 0.0)):
u[0] = 1.0
mag = vector_mag(u)
if (mag == 0.0):
return
for i in range(0, 3):
u[i] = u[i] / mag
v = cross_product(normal, u)
mag = vector_mag(v)
if (mag != 0.0):
for i in range(0, 3):
v[i] = v[i] / mag
return u, v | 3623a80fcb86d506e5f9e2f94d98a69a2831b2a5 | 3,658,105 |
def do_LEE_correction(max_local_sig, u1, u2, exp_phi_1, exp_phi_2):
"""
Return the global p-value for an observed local significance
after correcting for the look-elsewhere effect
given expected Euler characteristic exp_phi_1 above level u1
and exp_phi_2 above level u2
"""
n1, n2 = get_coefficients(u1,u2,exp_phi_1, exp_phi_2)
this_global_p = global_pvalue(max_local_sig**2, n1, n2)
print ' n1, n2 =', n1, n2
print ' local p_value = %f, local significance = %f' %(norm.cdf(-max_local_sig), max_local_sig)
print 'global p_value = %f, global significance = %f' %(this_global_p, -norm.ppf(this_global_p))
return this_global_p | 53ee295261d58c59aa1a0a667ec7ded2e986c256 | 3,658,106 |
def _check_password(request, mail_pass, uid):
"""
[メソッド概要]
パスワードチェック
"""
error_msg = {}
if len(mail_pass) <= 0:
error_msg['mailPw'] = get_message('MOSJA10004', request.user.get_lang_mode())
logger.user_log('LOSI10012', request=request)
logger.logic_log('LOSM17015', request=request)
else:
password_hash = OaseCommon.oase_hash(mail_pass)
user = User.objects.get(user_id=uid)
if not user:
error_msg['mailPw'] = get_message('MOSJA32010', request.user.get_lang_mode())
logger.user_log('LOSI10013', request=request)
logger.logic_log('LOSM17001', request=request)
if user and user.password != password_hash:
error_msg['mailPw'] = get_message('MOSJA32038', request.user.get_lang_mode())
logger.user_log('LOSI10013', request=request)
logger.logic_log('LOSM17016', request=request)
return error_msg | 8753d0ed32db0c501f2f18af9ea88253b7a1add7 | 3,658,107 |
def _read_wb_indicator(indicator: str, start: int, end: int) -> pd.DataFrame:
"""Read an indicator from WB"""
return pd.read_feather(config.paths.data + rf"/{indicator}_{start}_{end}.feather") | 87a52d5f683fc9795a7baf9ff81f2961567c3a13 | 3,658,108 |
def scatter_raster_plot(spike_amps, spike_depths, spike_times, n_amp_bins=10, cmap='BuPu',
subsample_factor=100, display=False):
"""
Prepare data for 2D raster plot of spikes with colour and size indicative of spike amplitude
:param spike_amps:
:param spike_depths:
:param spike_times:
:param n_amp_bins: no. of colour and size bins into which to split amplitude data
:param cmap:
:param subsample_factor: factor by which to subsample data when too many points for efficient
display
:param display: generate figure
:return: ScatterPlot object, if display=True also returns matplotlib fig and ax objects
"""
amp_range = np.quantile(spike_amps, [0, 0.9])
amp_bins = np.linspace(amp_range[0], amp_range[1], n_amp_bins)
color_bin = np.linspace(0.0, 1.0, n_amp_bins + 1)
colors = (cm.get_cmap(cmap)(color_bin)[np.newaxis, :, :3][0])
spike_amps = spike_amps[0:-1:subsample_factor]
spike_colors = np.zeros((spike_amps.size, 3))
spike_size = np.zeros(spike_amps.size)
for iA in range(amp_bins.size):
if iA == (amp_bins.size - 1):
idx = np.where(spike_amps > amp_bins[iA])[0]
# Make saturated spikes the darkest colour
spike_colors[idx] = colors[-1]
else:
idx = np.where((spike_amps > amp_bins[iA]) & (spike_amps <= amp_bins[iA + 1]))[0]
spike_colors[idx] = [*colors[iA]]
spike_size[idx] = iA / (n_amp_bins / 8)
data = ScatterPlot(x=spike_times[0:-1:subsample_factor], y=spike_depths[0:-1:subsample_factor],
c=spike_amps * 1e6, cmap='BuPu')
data.set_ylim((0, 3840))
data.set_color(color=spike_colors)
data.set_clim(clim=amp_range * 1e6)
data.set_marker_size(marker_size=spike_size)
data.set_labels(title='Spike times vs Spike depths', xlabel='Time (s)',
ylabel='Distance from probe tip (um)', clabel='Spike amplitude (uV)')
if display:
fig, ax = plot_scatter(data.convert2dict())
return data.convert2dict(), fig, ax
return data | a72da0b1faacb5e13da51a2dc192778d956eb7e5 | 3,658,110 |
def is_pack_real(*args):
"""
is_pack_real(F) -> bool
'FF_PACKREAL'
@param F (C++: flags_t)
"""
return _ida_bytes.is_pack_real(*args) | 64e3ecf58607cf7c363e84a7e5a69ce0c76e8acc | 3,658,111 |
import ast
from typing import List
from typing import Tuple
def _get_sim205(node: ast.UnaryOp) -> List[Tuple[int, int, str]]:
"""Get a list of all calls of the type "not (a <= b)"."""
errors: List[Tuple[int, int, str]] = []
if (
not isinstance(node.op, ast.Not)
or not isinstance(node.operand, ast.Compare)
or len(node.operand.ops) != 1
or not isinstance(node.operand.ops[0], ast.LtE)
):
return errors
comparison = node.operand
left = to_source(comparison.left)
right = to_source(comparison.comparators[0])
errors.append(
(node.lineno, node.col_offset, SIM205.format(a=left, b=right))
)
return errors | f0efdf0b10a0d4ec8a4a75772277169aa708e005 | 3,658,112 |
from typing import Union
def parse_boolean(val: str) -> Union[str, bool]:
"""Try to parse a string into boolean.
The string is returned as-is if it does not look like a boolean value.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
if val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
return val | e2cbda5a849e1166e0f2a3953220c93d1f3ba119 | 3,658,113 |
import csv
from datetime import datetime
def load_users(usertable):
"""
`usertable` is the path to a CSV with the following fields:
user.*
account.organisation
SELECT user.*, account.organisation FROM user LEFT JOIN account ON user.account_id = account.id;
"""
users = []
with open(usertable) as f:
reader = csv.reader(f)
next(reader) # skip headers
for row in reader:
fields = iter(row)
_ = next(fields) # id
email = next(fields)
hash = next(fields)
password_invalid = bool(int(next(fields)))
salt = next(fields)
first_name = next(fields)
last_name = next(fields)
is_verified = bool(int(next(fields)))
is_admin = bool(int(next(fields)))
_ = next(fields) # verification_uuid
_ = next(fields) # account_id
created = datetime.datetime.strptime(next(fields), DATE_FORMAT)
updated = datetime.datetime.strptime(next(fields), DATE_FORMAT)
# from user.account:
organisation = next(fields)
# not importing these:
if password_invalid or not is_verified or (is_admin and email == 'admin@scionlab.org'):
print('Warning: not importing user %s' % email)
continue
encoded = reencode_password(salt, hash)
users.append(UserDef(email, encoded, first_name, last_name, organisation,
created, updated))
return users | 44c1dad255e2d8152fadbb53523a53002af95001 | 3,658,114 |
def get_progress_status_view(request):
"""Get progress status of a given task
Each submitted task is identified by an ID defined when the task is created
"""
if 'progress_id' not in request.params:
raise HTTPBadRequest("Missing argument")
return get_progress_status(request.params['progress_id']) | 4e68fc45443443187032ce06552e97895316be41 | 3,658,115 |
def pretty_param_string(param_ids: "collection") -> str:
"""Creates a nice string showing the parameters in the given collection"""
return ' '.join(sorted(param_ids, key=utilize_params_util.order_param_id)) | 10f955480fcf760317f78d478c837c93df598e08 | 3,658,116 |
def _center_crop(image, size):
"""Crops to center of image with specified `size`."""
# Reference: https://github.com/mlperf/inference/blob/master/v0.5/classification_and_detection/python/dataset.py#L144 # pylint: disable=line-too-long
height = tf.shape(image)[0]
width = tf.shape(image)[1]
out_height = size
out_width = size
# Reference code:
# left = (width - out_width) / 2
# right = (width + out_width) / 2
# top = (height - out_height) / 2
# bottom = (height + out_height) / 2
# img = img.crop((left, top, right, bottom))
offset_height = tf.to_int32((height - out_height) / 2)
offset_width = tf.to_int32((width - out_width) / 2)
image = tf.image.crop_to_bounding_box(
image,
offset_height,
offset_width,
target_height=out_height,
target_width=out_width,
)
return image | 2409d06945e77633f70de3e76f7152f61a9eaacf | 3,658,117 |
def resample(ts, values, num_samples):
"""Convert a list of times and a list of values to evenly spaced samples with linear interpolation"""
assert np.all(np.diff(ts) > 0)
ts = normalize(ts)
return np.interp(np.linspace(0.0, 1.0, num_samples), ts, values) | 9453bba67add0307276ff71e85605812af337379 | 3,658,118 |
def supports_color(stream) -> bool: # type: ignore
"""Determine whether an output stream (e.g. stdout/stderr) supports displaying colored text.
A stream that is redirected to a file does not support color.
"""
return stream.isatty() and hasattr(stream, "isatty") | 4a427d6725206ef33b3f4da0ace6f2d6c3db78a9 | 3,658,120 |
from typing import Union
from typing import Optional
from typing import Tuple
from typing import Dict
from typing import List
from bs4 import BeautifulSoup
def parse_repo_links(
html: Union[str, bytes],
base_url: Optional[str] = None,
from_encoding: Optional[str] = None,
) -> Tuple[Dict[str, str], List[Link]]:
"""
.. versionadded:: 0.7.0
Parse an HTML page from a simple repository and return a ``(metadata,
links)`` pair.
The ``metadata`` element is a ``Dict[str, str]``. Currently, the only key
that may appear in it is ``"repository_version"``, which maps to the
repository version reported by the HTML page in accordance with :pep:`629`.
If the HTML page does not contain a repository version, this key is absent
from the `dict`.
The ``links`` element is a list of `Link` objects giving the hyperlinks
found in the HTML page.
:param html: the HTML to parse
:type html: str or bytes
:param Optional[str] base_url: an optional URL to join to the front of the
links' URLs (usually the URL of the page being parsed)
:param Optional[str] from_encoding: an optional hint to Beautiful Soup as
to the encoding of ``html`` when it is `bytes` (usually the ``charset``
parameter of the response's :mailheader:`Content-Type` header)
:rtype: Tuple[Dict[str, str], List[Link]]
:raises UnsupportedRepoVersionError: if the repository version has a
greater major component than the supported repository version
"""
soup = BeautifulSoup(html, "html.parser", from_encoding=from_encoding)
base_tag = soup.find("base", href=True)
if base_tag is not None:
if base_url is None:
base_url = base_tag["href"]
else:
base_url = urljoin(base_url, base_tag["href"])
if base_url is None:
def basejoin(url: str) -> str:
return url
else:
def basejoin(url: str) -> str:
assert isinstance(base_url, str)
return urljoin(base_url, url)
metadata = {}
pep629_meta = soup.find(
"meta",
attrs={"name": "pypi:repository-version", "content": True},
)
if pep629_meta is not None:
metadata["repository_version"] = pep629_meta["content"]
check_repo_version(metadata["repository_version"])
links = []
for link in soup.find_all("a", href=True):
links.append(
Link(
text="".join(link.strings).strip(),
url=basejoin(link["href"]),
attrs=link.attrs,
)
)
return (metadata, links) | 556ba2bb728c26668548d4f714dc12b1cf2b48bd | 3,658,121 |
def calc_kappa4Franci(T_K, a_H, a_H2CO3s):
"""
Calculates kappa4 in the PWP equation using approach from Franci's code.
Parameters
----------
T_K : float
temperature Kelvin
a_H : float
activity of hydrogen (mol/L)
a_H2CO3s : float
activity of carbonic acid (mol/L)
Returns
-------
kappa4 : float
constant kappa4 in the PWP equation (cm^4/mmol/s)
Notes
-----
See more info under documentation for pwpRateFranci().
"""
K_2 = calc_K_2(T_K)
K_c = calc_K_c(T_K)
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
kappa4 = (K_2/K_c)*(kappa1 + 1/a_H*(kappa2*a_H2CO3s + kappa3) )
return kappa4 | a5dcab9d871c7e78031ec74fef5f172e2a37f51b | 3,658,122 |
def get_candidates_from_single_line(single_line_address, out_spatial_reference, max_locations):
""" parses the single line address and passes it to the AGRC geocoding service
and then returns the results as an array of candidates
"""
try:
parsed_address = Address(single_line_address)
except Exception:
return []
return make_request(
parsed_address.normalized, parsed_address.zip_code or parsed_address.city, out_spatial_reference, max_locations
) | a2e4c68dc5a27dea98951bfe61c5e10ff887091a | 3,658,123 |
def create_store():
"""Gathers all the necessary info to create a new store"""
print("What is the name of the store?")
store_name = raw_input('> ')
return receipt.Store(store_name) | a9b78c73712b9ec3ed39f5851b970aa97e5d3575 | 3,658,124 |
def vgg11_bn_vib(cutting_layer, logger, num_client = 1, num_class = 10, initialize_different = False, adds_bottleneck = False, bottleneck_option = "C8S1"):
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGG_vib(make_layers(cutting_layer,cfg['A'], batch_norm=True, adds_bottleneck = adds_bottleneck, bottleneck_option = bottleneck_option), logger, num_client = num_client, num_class = num_class, initialize_different = initialize_different) | bf893f1e720aae92275abc961675a83c507425ee | 3,658,125 |
from pathlib import Path
def get_conf_paths(project_metadata):
"""
Get conf paths using the default kedro patterns, and the CONF_ROOT
directory set in the projects settings.py
"""
configure_project(project_metadata.package_name)
session = KedroSession.create(project_metadata.package_name)
_activate_session(session, force=True)
context = session.load_context()
pats = ("catalog*", "catalog*/**", "**/catalog*")
conf_paths = context.config_loader._lookup_config_filepaths(Path(context.config_loader.conf_paths[0]), pats, set())
return conf_paths | f001dc7d6991c57f32afb3d8d6e607d24bfd61cd | 3,658,126 |
import numpy
import ctypes
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU
Returns:
GPU array of pointers to matrices
"""
return cuda.to_gpu(numpy.arange(
a.ptr, a.ptr + a.shape[0] * a.strides[0], a.strides[0],
dtype=ctypes.c_void_p)) | 1b56c7b9cbc368612fb0f0f7ecd647b5045773a2 | 3,658,127 |
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response | ed9dab36b4f67a58e90542411474da733887f4b4 | 3,658,128 |
def create_LED_indicator_rect(**kwargs) -> QPushButton:
"""
Useful kwargs:
text: str, icon: QIcon, checked: bool, parent
checked=False -> LED red
checked=True -> LED green
"""
button = QPushButton(checkable=True, enabled=False, **kwargs)
button.setStyleSheet(SS_LED_INDICATOR_RECT)
return button | 3323a225b3f9ac6e687bb3a3d1c5f9d6a4459384 | 3,658,129 |
def getAlignments(infile):
""" read a PSL file and return a list of PslRow objects
"""
psls = []
with open(infile, 'r') as f:
for psl in readPsls(f):
psls.append(psl)
return psls | 00d6c0c4e44dd3de46c3bc7f38d40fd169311164 | 3,658,131 |
import numpy
def get_ring_kernel(zs,Rs):
"""Represents the potential influence due to a line charge
density a distance *delta_z* away, at which the azimuthally
symmetric charge distribution has a radius *R*."""
Logger.write('Computing ring kernels over %i x %i points...'%((len(zs),)*2))
#Form index enumerations
diag_inds=numpy.diag_indices(len(zs))
triud_inds=numpy.triu_indices(len(zs),k=0)
triu_inds=numpy.triu_indices(len(zs),k=1) #upper triangle
tril_inds=[triu_inds[1],triu_inds[0]] #lower triangle
global den1,den2
K=numpy.zeros((len(zs),)*2,dtype=numpy.float)
#position "2" corresponds to test charge (rows)
#position "1" corresponds to origin of field (columns)
zs2=zs.reshape((len(zs),1)); zs1=zs.reshape((1,len(zs)))
Rs2=Rs.reshape((len(zs),1)); Rs1=Rs.reshape((1,len(zs)))
dr2=(Rs1-Rs2)**2
dz2=(zs1-zs2)**2
rmod2=(Rs1+Rs2)**2
den1=numpy.sqrt(dz2+dr2)
dzs=list(numpy.diff(zs)); dzs=numpy.array(dzs+[dzs[-1]])
dRs=list(numpy.diff(Rs)); dRs=numpy.array(dRs+[dRs[-1]])
#fill in diagonal with non-vanishing separation,
#proportional to geometric mean of z-bins and local radial difference
den1[diag_inds]=numpy.sqrt(dRs**2+dzs**2)
arg1=-(4*Rs1*Rs2)/den1**2
den2=numpy.sqrt(dz2+rmod2)
arg2=+(4*Rs1*Rs2)/den2**2
#Get elliptic function values
ellipk_triud=interp_ellipk(arg1[triud_inds])
ellipk2_triud=interp_ellipk(arg2[triud_inds])
K[triud_inds]=(ellipk_triud/den1[triud_inds]+\
ellipk2_triud/den2[triud_inds])/numpy.pi
K[tril_inds]=K[triu_inds]
return K | 5cdbeb80c8658334245e4fa93f3acf9ac0f9dbc9 | 3,658,132 |
def dsdh_h(P, h, region = 0):
""" Derivative of specific entropy [kJ kg / kg K kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dsdh_h(P, h)
elif region is 2:
return region2.dsdh_h(P, h)
elif region is 4:
return region4.dsdh_h(P, h)
else:
return 0.000 | 0ecc9d783524873c2d8537e105c7d5e8814ec80c | 3,658,133 |
from datetime import datetime
def floor_datetime(dt, unit, n_units=1):
"""Floor a datetime to nearest n units. For example, if we want to
floor to nearest three months, starting with 2016-05-06-yadda, it
will go to 2016-04-01. Or, if starting with 2016-05-06-11:45:06
and rounding to nearest fifteen minutes, it will result in
2016-05-06-11:45:00.
"""
if unit == "years":
new_year = dt.year - (dt.year - 1) % n_units
return datetime.datetime(new_year, 1, 1, 0, 0, 0)
elif unit == "months":
new_month = dt.month - (dt.month - 1) % n_units
return datetime.datetime(dt.year, new_month, 1, 0, 0, 0)
elif unit == "weeks":
_, isoweek, _ = dt.isocalendar()
new_week = isoweek - (isoweek - 1) % n_units
return datetime.datetime.strptime(
"%d %02d 1" % (dt.year, new_week), "%Y %W %w"
)
elif unit == "days":
new_day = dt.day - dt.day % n_units
return datetime.datetime(dt.year, dt.month, new_day, 0, 0, 0)
elif unit == "hours":
new_hour = dt.hour - dt.hour % n_units
return datetime.datetime(dt.year, dt.month, dt.day, new_hour, 0, 0)
elif unit == "minutes":
new_minute = dt.minute - dt.minute % n_units
return datetime.datetime(
dt.year, dt.month, dt.day, dt.hour, new_minute, 0
)
elif unit == "seconds":
new_second = dt.second - dt.second % n_units
return datetime.datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, new_second
)
else:
msg = "Unknown unit type {}".format(unit)
raise ValueError(msg) | 8c4b61b29bf9f254e2da46097e498834b54e960f | 3,658,134 |
def get_dataset_descriptor(project_id, dataset_id):
"""Get the descriptor for the dataset with given identifier."""
try:
dataset = api.datasets.get_dataset_descriptor(
project_id=project_id,
dataset_id=dataset_id
)
if not dataset is None:
return jsonify(dataset)
except ValueError as ex:
raise srv.InvalidRequest(str(ex))
raise srv.ResourceNotFound('unknown project \'' + project_id + '\' or dataset \'' + dataset_id + '\'') | 776c7f72730f52e07cf33a6f6b4c7a949810323d | 3,658,135 |
def pe41():
"""
>>> pe41()
7652413
"""
primes = Primes(1000000)
for perm in permutations(range(7, 0, -1)):
n = list_num(perm)
if primes.is_prime(n):
return n
return -1 | bec7969b96f617848f8771dc6d85faf4b01ea648 | 3,658,136 |
def transit_flag(body, time, nsigma=2.0):
"""Return a flag that indicates if times occured near transit of a celestial body.
Parameters
----------
body : skyfield.starlib.Star
Skyfield representation of a celestial body.
time : np.ndarray[ntime,]
Unix timestamps.
nsigma : float
Number of sigma to flag on either side of transit.
Returns
-------
flag : np.ndarray[ntime,]
Boolean flag that is True if the times occur within nsigma of transit
and False otherwise.
"""
time = np.atleast_1d(time)
obs = ephemeris.chime
# Create boolean flag
flag = np.zeros(time.size, dtype=np.bool)
# Find transit times
transit_times = obs.transit_times(
body, time[0] - 24.0 * 3600.0, time[-1] + 24.0 * 3600.0
)
# Loop over transit times
for ttrans in transit_times:
# Compute source coordinates
sf_time = ephemeris.unix_to_skyfield_time(ttrans)
pos = obs.skyfield_obs().at(sf_time).observe(body)
alt = pos.apparent().altaz()[0]
dec = pos.cirs_radec(sf_time)[1]
# Make sure body is above horizon
if alt.radians > 0.0:
# Estimate the amount of time the body is in the primary beam
# as +/- nsigma sigma, where sigma denotes the width of the
# primary beam. We use the lowest frequency and E-W (or X) polarisation,
# since this is the most conservative (largest sigma).
window_deg = nsigma * cal_utils.guess_fwhm(
400.0, pol="X", dec=dec.radians, sigma=True
)
window_sec = window_deg * 240.0 * ephemeris.SIDEREAL_S
# Flag +/- window_sec around transit time
begin = ttrans - window_sec
end = ttrans + window_sec
flag |= (time >= begin) & (time <= end)
# Return boolean flag indicating times near transit
return flag | 271378e0a6558491f73968200fcb24ec694f8cbe | 3,658,137 |
def _parse_port_ranges(pool_str):
"""Given a 'N-P,X-Y' description of port ranges, return a set of ints."""
ports = set()
for range_str in pool_str.split(','):
try:
a, b = range_str.split('-', 1)
start, end = int(a), int(b)
except ValueError:
log.error('Ignoring unparsable port range %r.', range_str)
continue
if start < 1 or end > 65535:
log.error('Ignoring out of bounds port range %r.', range_str)
continue
ports.update(set(range(start, end + 1)))
return ports | 6926b326ea301f21e2282edda3bc16169ebe90b4 | 3,658,138 |