content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def linearly_spaced_combinations(bounds, num_samples):
"""
Return 2-D array with all linearly spaced combinations with the bounds.
Parameters
----------
bounds : sequence of tuples
The bounds for the variables, [(x1_min, x1_max), (x2_min, x2_max), ...]
num_samples : integer or array_likem
Number of samples to use for every dimension. Can be a constant if
the same number should be used for all, or an array to fine-tune
precision. Total number of data points is num_samples ** len(bounds).
Returns
-------
combinations : 2-d array
A 2-d arrray. If d = len(bounds) and l = prod(num_samples) then it
is of size l x d, that is, every row contains one combination of
inputs.
"""
bounds = np.atleast_2d(bounds)
num_vars = len(bounds)
num_samples = np.broadcast_to(num_samples, num_vars)
# Create linearly spaced test inputs
inputs = [np.linspace(b[0], b[1], n) for b, n in zip(bounds,
num_samples)]
# Convert to 2-D array
return combinations(inputs) | 4d493290ae5c8af91f2f0dce5041c12c22bb6aaa | 569 |
from typing import Optional
from typing import Tuple
def flatten_expressions_tree(
expression: Optional[Expression]) -> Tuple[Expression, ...]:
"""
Flatten expressions tree into a list.
"""
if not expression:
return tuple()
expressions = [expression]
for arg in expression.arguments:
if is_expression(arg):
expressions.extend(flatten_expressions_tree(arg))
return tuple(expressions) | dea87f37663e995a74b7709b3dbb62d5cc370f50 | 570 |
def policy_head(x, mode, params):
"""
The policy head attached after the residual blocks as described by DeepMind:
1. A convolution of 8 filters of kernel size 3 × 3 with stride 1
2. Batch normalisation
3. A rectifier non-linearity
4. A fully connected linear layer that outputs a vector of size 19²+1 = 362
corresponding to logit probabilities for all intersections and the pass
move
"""
num_channels = params['num_channels']
num_samples = params['num_samples']
def _forward(x, is_recomputing=False):
""" Returns the result of the forward inference pass on `x` """
y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)
y = tf.nn.relu(y)
y = tf.reshape(y, (-1, 361 * num_samples))
y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)
return tf.cast(y, tf.float32)
return recompute_grad(_forward)(x) | 8e8418ffd46857cf03921ea516729d15a0ef5ca9 | 571 |
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = sum(sentences, [])
return _get_ngrams(n, words) | 73640beb269895d7c54d21825135908eba3f3bd4 | 573 |
from typing import List
def override_list(base_list: List, dynamic_key: str, val):
"""
Customize the base list by updating with the
dynamic_key and val.
Parameters
----------
base: dict
Dictionary or List to be customized with dynamic args
dynamic_key: str
Key to identify the location the value should be updated.
Nested with DOT like "custom.key_0.key_1.key_2.0.0.key_4"
val: str or float or int or dict or list
Value to be set
Returns
-------
dict
Updated base_list based on the key-value pairs in dynamic_args
Notes
-----
This will be called recursively with override_dict.
If dynamic_key is not a number, then we try to match on `name` field
in the list of dictionaries.
"""
def find_root_key_index(base_list, root_key):
if root_key.isdigit():
# If array index
root_key = int(root_key)
else:
# If string, then match on `name`
for root_key_i in range(len(base_list)):
if root_key == base_list[root_key_i][NAME]:
root_key = root_key_i
break
if not isinstance(root_key, int):
raise KeyError("{} not found in List".format(root_key))
return root_key
if DOT in dynamic_key:
# Compute root and subtree keys
root_key = find_root_key_index(base_list, dynamic_key.split(DOT)[0])
subtree_key = DOT.join(dynamic_key.split(DOT)[1:])
# Extract subtree
subtree = base_list[root_key]
if isinstance(subtree, dict):
root_val = override_dict(base_dict=subtree,
dynamic_key=subtree_key,
val=val)
elif isinstance(subtree, list):
root_val = override_list(base_list=subtree,
dynamic_key=subtree_key,
val=val)
else:
raise ValueError(
"Unsupported subtree type. Must be one of list or dict")
else:
# End of nested dynamic key
root_key = find_root_key_index(base_list, dynamic_key)
root_val = val
base_list[root_key] = root_val
return base_list | eb7e4a0462d2db82cb1900784b5d99e2865dcc00 | 574 |
def c_components(DAG):
"""Return a list of the maximal c-component node sets in DAG."""
G = nx.Graph();
G.add_nodes_from(observable_nodes(DAG))
G.add_edges_from([(u,v) for u,v in observable_pairs(DAG) if
has_confounded_path(DAG, u, v)])
return list(nx.connected_components(G)) | 85c4ae4ee7b9572e31727f672864e4b079e3bde7 | 575 |
def wss_over_number_of_clusters(data, algorithm='kmeans',
max_iter=100, num_repeats = 5, max_num_clusters = 12,
plot_file = None):
"""
Calculates the within-sum-of-squares (WSS) for different numbers of clusters,
averaged over several iterations.
Parameters
----------
data : float array
Trajectory data [frames,frame_data]
algorithm : string
The algorithm to use for the clustering.
Options: kmeans, rspace.
Default: kmeans
max_iter : int, optional
Maximum number of iterations.
Default: 100.
num_repeats : int, optional
Number of times to run the clustering for each number of clusters.
Default: 5.
max_num_clusters : int, optional
Maximum number of clusters for k-means clustering.
Default: 12.
plot_file : str, optional
Name of the file to save the plot.
Returns
-------
all_wss : float array
WSS values for each number of clusters (starting at 2).
std_wss : float array
Standard deviations of the WSS.
"""
# Initialize lists
all_wss = []
std_wss = []
# Loop over the number of clusters
for nc in range(1,max_num_clusters):
rep_wss = []
# Run each clustering several times.
for repeat in range(num_repeats):
# Get clusters and WSS for this repetition.
cc = obtain_clusters(data, algorithm=algorithm, max_iter=max_iter,
num_clusters=nc, plot=False)
cidx, wss, centroids = cc
rep_wss.append(wss)
# Calculate mean and standard deviation for this number of clusters.
all_wss.append(np.mean(rep_wss))
std_wss.append(np.std(rep_wss))
# Plot the WSS over the number of clusters
fig, ax = plt.subplots(1,1, figsize=[4,3], dpi=300)
ax.errorbar(np.arange(len(all_wss))+2,np.array(all_wss),yerr=np.array(std_wss)/np.sqrt(num_repeats))
ax.set_xlabel('number of clusters')
ax.set_ylabel('total WSS')
fig.tight_layout()
# Save the plot to file.
if plot_file: fig.savefig(plot_file)
return all_wss, std_wss | fa00e00a5b0ba53b4b578fae04c43c69500d2d97 | 576 |
from typing import List
from typing import Tuple
def separate_classes(x: np.ndarray, y: np.ndarray) -> List[Tuple[int, np.ndarray]]:
"""Separate samples by classes into a list.
Args:
x (np.ndarray): Samples.
y (np.ndarray): Target labels (classes).
Returns:
List[Tuple[int, np.ndarray]]: List in the format [(class, samples),...]
"""
classes = np.unique(y)
l = []
for clss in classes:
l.append((clss, x[y==clss]))
return l | ee0a93b44785f6f017769da6fd6f1cc65f9cf121 | 577 |
import time
from datetime import datetime
def monitor_threads(threads, arguments):
"""
Monitor the threads.
Parameters
----------
threads: dict
The threads to monitor.
arguments: namespace
The parsed command line.
# --GT-- not used, kept to avoid to break the function call.
Returns
-------
int
0
"""
try:
# exit and let systemd restart the process to avoid issues with
# potential memory leaks
time.sleep(60 * 60 * 2)
except Exception:
# the sleep was interrupted
pass
for th in threads.keys():
threads[th].stop()
# give up to 30 seconds for threads to exit cleanly
timeout = datetime.now() + timedelta(seconds=30)
while timeout > datetime.now():
thread_running = False
for th in threads.keys():
if threads[th].is_alive():
thread_running = True
if not thread_running:
break
return 0 | fa0e1f329cb70eb1fd81c74c9ec555921cab9041 | 578 |
def fetch_cfr_parts(notice_xml):
""" Sometimes we need to read the CFR part numbers from the notice
XML itself. This would need to happen when we've broken up a
multiple-effective-date notice that has multiple CFR parts that
may not be included in each date. """
parts = []
for cfr_elm in notice_xml.xpath('//CFR'):
parts.extend(notice_cfr_p.parseString(cfr_elm.text).cfr_parts)
return list(sorted(set(parts))) | d4fc1be9004f5a670c58dc3badaef80d53f73fc4 | 579 |
def get_dayofweek(date):
"""
Returns day of week in string format from date parameter (in datetime format).
"""
return date.strftime("%A") | 4a0f728733870998331ea6f796b167b9dd3276ab | 580 |
def add_model_output(modelIn, mode=None, num_add=None, activation=None):
""" This function modifies the last dense layer in the passed keras model. The modification includes adding units and optionally changing the activation function.
Parameters
----------
modelIn : keras model
Keras model to be modified.
mode : string
Mode to modify the layer. It could be:
'abstain' for adding an arbitrary number of units for the abstention optimization strategy.
'qtl' for quantile regression which needs the outputs to be tripled.
'het' for heteroscedastic regression which needs the outputs to be doubled. (current implicit default: 'het')
num_add : integer
Number of units to add. This only applies to the 'abstain' mode.
activation : string
String with keras specification of activation function (e.g. 'relu', 'sigomid', 'softmax', etc.)
Return
----------
modelOut : keras model
Keras model after last dense layer has been modified as specified. If there is no mode specified it returns the same model.
"""
if mode is None:
return modelIn
numlayers = len(modelIn.layers)
# Find last dense layer
i = -1
while 'dense' not in (modelIn.layers[i].name) and ((i+numlayers) > 0):
i -= 1
# Minimal verification about the validity of the layer found
assert ((i + numlayers) >= 0)
assert ('dense' in modelIn.layers[i].name)
# Compute new output size
if mode is 'abstain':
assert num_add is not None
new_output_size = modelIn.layers[i].output_shape[-1] + num_add
elif mode is 'qtl': # for quantile UQ
new_output_size = 3 * modelIn.layers[i].output_shape[-1]
else: # for heteroscedastic UQ
new_output_size = 2 * modelIn.layers[i].output_shape[-1]
# Recover current layer options
config = modelIn.layers[i].get_config()
# Update number of units
config['units'] = new_output_size
# Update activation function if requested
if activation is not None:
config['activation'] = activation
# Create new Dense layer
reconstructed_layer = Dense.from_config(config)
# Connect new Dense last layer to previous one-before-last layer
additional = reconstructed_layer(modelIn.layers[i-1].output)
# If the layer to replace is not the last layer, add the remainder layers
if i < -1:
for j in range(i+1, 0):
config_j = modelIn.layers[j].get_config()
aux_j = layers.deserialize({'class_name': modelIn.layers[j].__class__.__name__,
'config': config_j})
reconstructed_layer = aux_j.from_config(config_j)
additional = reconstructed_layer(additional)
modelOut = Model(modelIn.input, additional)
return modelOut | 2b869477bb67f7349569d15e0ada229cc1400e39 | 581 |
def parse(f):
"""Parse ASDL from the given file and return a Module node describing it."""
parser = ASDLParser()
return parser.parse(f) | ca6b97c2181444dd7325877d67d74bb894c1bfef | 582 |
def distance(xyz, lattice, PBC=[1,2,3]):
"""
Returns the Euclidean distance from the origin for a fractional
displacement vector. Takes into account the lattice metric and periodic
boundary conditions, including up to one non-periodic axis.
Args:
xyz: a fractional 3d displacement vector. Can be obtained by
subtracting one fractional vector from another
lattice: a 3x3 matrix describing a unit cell's lattice vectors
PBC: the axes, if any, which are periodic. 1, 2, and 3 correspond
to x, y, and z respectively.
Returns:
a scalar for the distance of the point from the origin
"""
xyz = filtered_coords(xyz, PBC=PBC)
matrix = create_matrix(PBC=PBC)
matrix += xyz
matrix = np.dot(matrix, lattice)
return np.min(cdist(matrix,[[0,0,0]])) | e6a6a925773b35996cc5f71982b72aeaef25cc4f | 583 |
def dobro(n=0, formato=False):
"""
Dobrar número
:param n: número a ser dobrado
:param formato: (opicional) mostrar o moeda
:return: resultado
"""
n = float(n)
n += n
return moeda(n) if formato else n | f01267cf432d48ab263d90756fd920c7d6d987a1 | 584 |
def f_q2d(n, m):
"""Lowercase f term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.18b).
Parameters
----------
n : int
radial order
m : int
azimuthal order
Returns
-------
float
f
"""
if n == 0:
return np.sqrt(F_q2d(n=0, m=m))
else:
return np.sqrt(F_q2d(n, m) - g_q2d(n-1, m) ** 2) | 52919fab40e51dfa8ba89eb1d6f97ec733ce9de5 | 585 |
def binary_search(data, target, low, high):
"""Return position if target is found in indicated portion of a python list and -1 if target is not found.
"""
if low > high:
return -1
mid = (low + high) // 2
if target == data[mid]:
return mid
elif target < data[mid]:
# recur on the portion left of the middle
return binary_search(data, target, low, mid - 1)
else:
# recur on the portion right of the middle
return binary_search(data, target, mid + 1, high) | 8f85596e4ff8971f4002b2f108c9c276304924be | 586 |
def delete_position(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
db = get_db()
db.execute('DELETE FROM gatekeeping WHERE id = ?', (id,))
db.commit()
return jsonify(status='ok') | cf5d6580eea191a0d17d1a98e2f86a8610a5f58a | 587 |
import struct
def read_string(stream, length):
"""read data from the file and return as a text string
"""
text = struct.unpack('{}s'.format(length), stream.read(length))
try:
result = str(text[0], encoding='utf-8')
except UnicodeDecodeError:
result = str(text[0], encoding='latin-1')
return result.rstrip('\x00') | da8ddf04ea6c0232e59c4612105a243a5c1807d4 | 588 |
import math
def generate_gate_y_hamiltonian_vec() -> np.ndarray:
"""Return the vector representation for the Hamiltonian of a Y gate with respect to the orthonormal Hermitian matrix basis with the normalized identity matrix as the 0th element.
The result is a real vector with size 4.
Parameters
----------
Returns
----------
np.ndarray
The real vector representation of the Hamiltonian of the gate.
"""
dim = 2
coeff = 0.5 * math.pi * np.sqrt(2)
vec = np.zeros(dim * dim, dtype=np.float64)
vec[0] = -coeff
vec[2] = coeff
return vec | 0a290c8f4a2c0acd81439caa4e5c4a7032a91d83 | 589 |
def _semi_implicit_midpoint(ode_fun, jac_fun, y_olds, t_old, f_old, dt, args,
solver_parameters, J00, I):
"""
Calculate solution at t_old+dt using the semi-implicit midpoint
formula. Based on equations IV.9.16a-b of Ref II.
"""
y_older, y_old = y_olds
je_tot=0
if(y_older is None): # Use Euler to get starting value
return _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old, dt, args, solver_parameters,
J00, I)
if(f_old is None):
f_yj = ode_fun(*(y_old,t_old)+args)
fe_tot = 1
else: # We already computed it and can just reuse it
f_yj = f_old
fe_tot=0
b = np.dot(-(I+dt*J00),(y_old-y_older)) + 2*dt*f_yj
A = I-dt*J00
if(solver_parameters['initialGuess']): # Use Euler for initial guess
x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun, y_olds,
t_old, f_yj, dt,
args, solver_parameters)
fe_tot += fe_tot_
else:
x0=None
dy = linear_solve(A, b, iterative=solver_parameters['iterative'],
tol=solver_parameters['min_tol'], x0=x0)
y_new = y_old + dy
return (y_new, f_yj, fe_tot, je_tot) | 7807427e64d4348eb4deb4624d1db5e4df0226ca | 590 |
from tensorflow.python.framework.graph_util import (
convert_variables_to_constants,
remove_training_nodes,
)
def freeze_session(session,
keep_var_names=None,
output_names=None,
clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(
set(v.op.name for v in tf.global_variables()).difference(
keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
# Graph -> GraphDef ProtoBuf
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
frozen_graph = remove_training_nodes(frozen_graph)
return frozen_graph | 3617fc93a1727fcabf128622fcaf0e8dd5008b24 | 591 |
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle) | c2edb6c0b87449c31aba1fe023736e30cdf41154 | 592 |
def Mce1(m, q, ξ, *, p=0):
"""
v = Mce1(m, q, ξ, *, p=0)
Compute the value of the even Radial Mathieu function of the first kind
Mce⁽¹⁾ₘ(q, ξ).
Parameters
----------
m : array_like
interger order of the Mathieu function
q : array_like
positive parameter in the Mathieu differential equation
ξ : array like
``radial'' coordinate in the Elliptic coordinates
p : 0 or 1 or 2 (default 0)
0 for the function,
1 for the first derivative
2 for the second derivative
Returns
-------
v : array like
value of Mce⁽¹⁾ₘ(q, ξ) or Mce⁽¹⁾ₘ′(q, ξ) or Mce⁽¹⁾ₘ′´(q, ξ)
"""
if p == 0:
return mathieu_modcem1(m, q, ξ)[0]
if p == 1:
return mathieu_modcem1(m, q, ξ)[1]
if p == 2:
return (mathieu_a(m, q) - (2 * q) * cosh(2 * ξ)) * mathieu_modcem1(m, q, ξ)[0]
raise ValueError("The value p must be 0, 1, or 2.") | 7a5c7fc6b9a9380c5d763f1773a26516345e5a3b | 593 |
def profileown():
"""Display user's profile"""
return render_template("profile.html", user=session, person=session, books=None) | 83ac3e78fb2f3b43596874c8208099d041536ebb | 594 |
def GetChildConfigListMetadata(child_configs, config_status_map):
"""Creates a list for the child configs metadata.
This creates a list of child config dictionaries from the given child
configs, optionally adding the final status if the success map is
specified.
Args:
child_configs: The list of child configs for this build.
config_status_map: The map of config name to final build status.
Returns:
List of child config dictionaries, with optional final status
"""
child_config_list = []
for c in child_configs:
pass_fail_status = None
if config_status_map:
if config_status_map[c['name']]:
pass_fail_status = constants.FINAL_STATUS_PASSED
else:
pass_fail_status = constants.FINAL_STATUS_FAILED
child_config_list.append({'name': c['name'],
'boards': c['boards'],
'status': pass_fail_status})
return child_config_list | 621fa06eb0055a2dec1941bb4fd84ecb8fd6847c | 595 |
def get_samples(df, selected_rows, no_of_samples, records_in_db):
""" get samples without shuffling columns """
df_fixed = None
df_random = None
generic_data_dict = []
#drop rows with 'ignore' set to 'yes'
if 'ignore' in df.columns:
df = df[df["ignore"] != "yes"]
df = df.drop(['ignore'], axis = 1)
print_info("================================================================================")
print_info("Total no. of samples found in variable xls file : {}".format(len(df.index)))
print_info("Total no. of samples already tested : {}".format(len(records_in_db)))
print_info("Total no. of samples remaining to test : {}".format(len(df.index) - len(records_in_db)))
print_info("Total no. of random samples selected in this test : {}".format(no_of_samples))
if selected_rows:
print_info("Selected rows to test : {}".format(selected_rows))
print_info("================================================================================")
#select user selected rows
if selected_rows:
selected_rows = [row-1 for row in selected_rows]
df_fixed = df.iloc[selected_rows]
df = df.drop(selected_rows, axis=0)
#select records in df which are not in db_df
db_df = pd.DataFrame(records_in_db)
if db_df.columns.tolist():
df = df.merge(db_df, how = 'outer' ,indicator=True).\
loc[lambda x : x['_merge']=='left_only']
df = df.drop(['_merge'], axis = 1)
if no_of_samples and len(df.index) == 0:
print_error("All the samples are tested. use --reset_execution to restart test again")
exit(1)
if no_of_samples and no_of_samples <= len(df.index):
#select random samples
df_random = df.sample(n=no_of_samples)
elif no_of_samples and no_of_samples > len(df.index):
print_error("Given no. of samples {} is greater than remaining samples to" \
" test {}. please reduce no. of samples".format(no_of_samples, len(df.index)))
exit(1)
df = pd.concat([df_fixed, df_random])
generic_data_dict = df.to_dict('records')
print_info("selected samples : {}".format(generic_data_dict))
print_info("================================================================================")
return generic_data_dict | 16b13d59e50ebcb16e65feea41bc801b1e8b87b8 | 596 |
def update_member_names(oldasndict, pydr_input):
"""
Update names in a member dictionary.
Given an association dictionary with rootnames and a list of full
file names, it will update the names in the member dictionary to
contain '_*' extension. For example a rootname of 'u9600201m' will
be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
as an input and not the corresponding GEIS file.
"""
omembers = oldasndict['members'].copy()
nmembers = {}
translated_names = [f.split('.fits')[0] for f in pydr_input]
newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]
keys_map = list(zip(newkeys, pydr_input))
for okey, oval in list(omembers.items()):
if okey in newkeys:
nkey = pydr_input[newkeys.index(okey)]
nmembers[nkey.split('.fits')[0]] = oval
oldasndict.pop('members')
# replace should be always True to cover the case when flt files were removed
# and the case when names were translated
oldasndict.update(members=nmembers, replace=True)
oldasndict['order'] = translated_names
return oldasndict | 364a4bdd742fe03545e9bb4fb72ef996beea1550 | 597 |
def part1(entries: str) -> int:
"""part1 solver take a str and return an int"""
houses = {(0, 0): 1}
pos_x, pos_y = 0, 0
for direction in entries:
delta_x, delta_y = moves[direction]
pos_x += delta_x
pos_y += delta_y
houses[(pos_x, pos_y)] = houses.get((pos_x, pos_y), 0) + 1
return len(houses) | 104cf7b32ec9f395603a32f57b146129d30a417b | 598 |
def quantile_compute(x, n_bins):
"""Quantile computation.
Parameters
----------
x: pd.DataFrame
the data variable we want to obtain its distribution.
n_bins: int
the number of bins we want to use to plot the distribution.
Returns
-------
quantiles: np.ndarray
the quantiles.
"""
# aux.quantile(np.linspace(0, 1, 11)) # version = 0.15
quantiles = [x.quantile(q) for q in np.linspace(0, 1, n_bins+1)]
quantiles = np.array(quantiles)
return quantiles | 4c23b417bb8c9e99709fca2371e476e4edadfeec | 600 |
def remove_separators(version):
"""Remove separator characters ('.', '_', and '-') from a version.
A version like 1.2.3 may be displayed as 1_2_3 in the URL.
Make sure 1.2.3, 1-2-3, 1_2_3, and 123 are considered equal.
Unfortunately, this also means that 1.23 and 12.3 are equal.
Args:
version (str or Version): A version
Returns:
str: The version with all separator characters removed
"""
version = str(version)
version = version.replace('.', '')
version = version.replace('_', '')
version = version.replace('-', '')
return version | c29b9e7ca84705a9f36123ff0d84e3beb24468bf | 601 |
def parse_coverage_status(status):
"""Parse a coverage status"""
return Status.HIT if status.upper() == 'SATISFIED' else Status.MISSED | 8a295b3df2d10b7813ce0efc96331607da5ba1b9 | 602 |
def max_index(list):
"""Returns the index of the max value of list."""
split_list = zip(list, range(len(list)))
(retval, retI) = reduce(lambda (currV, currI), (nV, nI):
(currV, currI) if currV > nV
else (nV, nI), split_list)
return retI | 43e15edbc227b13db3d468b4ee8d030770d5f1a2 | 603 |
def mask_coverage(coverage: mx.sym.Symbol, source_length: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Masks all coverage scores that are outside the actual sequence.
:param coverage: Input coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
:param source_length: Source length. Shape: (batch_size,).
:return: Masked coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
"""
return mx.sym.SequenceMask(data=coverage, axis=1, use_sequence_length=True, sequence_length=source_length) | d2594a12a78d604c26588218f8b084bf5c18d2ac | 604 |
def DFS_complete(g):
"""Perform DFS for entire graph and return forest as a dictionary.
Result maps each vertex v to the edge that was used to discover it.
(Vertices that are roots of a DFS tree are mapped to None.)
"""
forest = {}
for u in g.vertices():
if u not in forest:
forest[u] = None # u will be the root of a tree
DFS(g, u, forest)
return forest | ec141354d41cf87381db841e6adf8e888a573494 | 605 |
def _transform_p_dict(p_value_dict):
"""
Utility function that transforms a dictionary of dicts into a dataframe representing the dicts as rows
(like tuples). Is needed to keep track of the feature names and corresponding values.
The underlying datastructures are confusing.
:param p_value_dict: dictionary of dictionaries storing the p_values
:return: dataframe where the keys are added to the p_values as columns
"""
# Turn dictionary of dictionaries into a collection of the key-value pairs represented as nested tuples
item_dict = dict()
for feat in p_value_dict:
item_dict[feat] = list(p_value_dict[feat].items())
# building a matrix (nested lists) by extracting and sorting data from nested tuples
# (items[0], (nested_items[0], nested_items[1]))
df_matrix = []
for items in item_dict.items():
for nested_items in items[1]:
df_matrix.append([nested_items[1], nested_items[0], items[0]])
return pd.DataFrame(df_matrix) | 154ce78ae03267ce69254ff583ca0b736d62d435 | 606 |
import torch
from typing import Optional
def iou(
predict: torch.Tensor,
target: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
This is a great loss because it emphasizes on the active
regions of the predict and targets
"""
dims = tuple(range(predict.dim())[1:])
if mask is not None:
predict = predict * mask
target = target * mask
intersect = (predict * target).sum(dims)
union = (predict + target - predict * target).sum(dims) + 1e-4
return (intersect / union).sum() / intersect.numel() | 7608189bde3b640a8f148e3628e5668a3b310655 | 607 |
import re
def get_sequence(seq_id):
"""
TO DO:
1. redirection 303. (not tested in compliance_suite)
2. Note: compliance_suite ignores the range if it is out of bounds or if > SUBSEQUENCE_LIMIT
3. Ambiguous error code resolution in refget documentation:
range:
The server MUST respond with a Bad Request error if one or more ranges are out of bounds of the sequence.
If the server supports circular chromosomes and the chromosome is not circular
or the range is outside the bounds of the chromosome the server shall return Range Not Satisfiable.
start, end:
The server MUST respond with a Bad Request error if start is specified and is larger than the total sequence length.
If the server supports circular chromosomes and the chromosome is not circular
or the range is outside the bounds of the chromosome the server shall return Range Not Satisfiable.
4. Should we validate the response headers in the compliance suite?
5. check if start and end are 32 bit
"""
header_content = request.headers
accept_type = "text/vnd.ga4gh.refget.v1.0.0+plain"
# validate the accept header
if "accept" in header_content and header_content["accept"] not in [accept_type,"*/*"]:
# bad mock server: status = 200 when headers are incorrect
return Response(status=200)
# check if the sequence is present. If not, error = 404
sequence_obj = get_sequence_obj(seq_id, DATA, TRUNC512)
if not sequence_obj:
# bad mock server: status = 200 when sequence is not found
return Response(status=200)
# get start, end and range params
start = request.args.get('start', default = None)
end = request.args.get('end', default=None)
RANGE_HEADER_PATTERN = re.compile(r'bytes=[0-9]*-[0-9]*$')
if "range" in header_content:
seq_range = header_content["Range"]
# bad mock server: status = 404 when both start/end and range parameters are passed
if (start or end) and seq_range:
return Response(status=404)
# bad mock server: status = 404 when range pattern is not as expected
if not RANGE_HEADER_PATTERN.match(seq_range):
return Response(status=404)
fbs = int(header_content['Range'].split('=')[1].split('-')[0])
lbs = int(header_content['Range'].split('=')[1].split('-')[1])
if (fbs > sequence_obj.size-1) or (fbs > lbs):
"""
Note:
we are ignoring case where (lbs > sequence_obj.size-1), (lbs-fbs+1 > SUBSEQUENCE_LIMIT) to match the compliance_suite tests
filtering the case where (fbs > lbs) : Sub-sequences of circular chromosomes across the origin may not be requested via the Range header
"""
# since fbs and lbs <= SUBSEQUENCE_LIMIT-1, it is implied that the subsequence limit if any is satisfied
# bad mock server: incorrect status = 400
return Response(status=400)
elif (lbs > sequence_obj.size-1) :
lbs = sequence_obj.size
# bad mock server: status = 200 while returning subsequence
return Response(response=(sequence_obj.sequence[fbs:lbs+1]).encode("ascii"),status=200, mimetype=accept_type)
if start or end:
# if start or end params are present, return the filtered sequence
# if start or end params are not 32 bit unsigned int, error = 400
# if start > sequence length, error =400
# if start > end and circular not implemented, error = 501
if start:
# TO DO: check if start and end are 32 bit
if not start.isdigit(): #checks if start is unsigned int
# bad mock server: incorrect status 200
return Response(status=200)
start = int(start)
else: start = 0
if end:
if not end.isdigit(): #checks if end is unsigned int
# bad mock server: incorrect status 200
return Response(status=200)
end = int(end)
else: end = sequence_obj.size
if start >= sequence_obj.size or end > sequence_obj.size :
# bad mock server: incorrect status 400
return Response(status=400)
elif start > end:
if CIRCULAR_CHROMOSOME_SUPPORT == False:
# bad mock server: incorrect status 416
return Response(status=416)
else:
if sequence_obj.is_circular == False:
# bad mock server: incorrect status 500
return Response(status=500)
else:
if len(sequence_obj.sequence[start:sequence_obj.size]+sequence_obj.sequence[0:end])>SUBSEQUENCE_LIMIT:
# bad mock server: incorrect status 400
return Response(status=400)
else:
# bad mock server: incorrect status 404
return Response(response=(sequence_obj.sequence[start:sequence_obj.size]+sequence_obj.sequence[0:end]).encode("ascii"),status=404,mimetype=accept_type)
elif end-start >SUBSEQUENCE_LIMIT:
# bad mock server: incorrect status 200
return Response(status=200)
# bad mock server: incorrect status 404
return Response(response=(sequence_obj.sequence[start:end]).encode("ascii"),status=404,mimetype=accept_type)
# bad mock server: incorrect status 500
return Response(response=(sequence_obj.sequence).encode("ascii"), status=500,mimetype=accept_type) | d0d10c1c491d32ffc70c5579330163bee11d5a15 | 608 |
def getCondVisibility(condition):
"""
Returns ``True`` (``1``) or ``False`` (``0``) as a ``bool``.
:param condition: string - condition to check.
List of Conditions: http://wiki.xbmc.org/?title=List_of_Boolean_Conditions
.. note:: You can combine two (or more) of the above settings by using "+" as an ``AND`` operator,
"|" as an ``OR`` operator, "!" as a ``NOT`` operator, and "[" and "]" to bracket expressions.
example::
visible = xbmc.getCondVisibility('[Control.IsVisible(41) + !Control.IsVisible(12)]')
"""
return bool(1) | 761914696ac2050c6bf130e5b49221be043903bd | 609 |
def history_cumulative(request):
"""
This endpoints returns the number of cumulative infections for each area given a date in history.
"""
days = int(request.query_params.get("days"))
observed = Covid19DataPoint.objects.all()
historyDate = max([d.date for d in observed]) - timedelta(days=-days)
shownData = observed.filter(date=historyDate)
deathData = Covid19DeathDataPoint.objects.filter(date=historyDate)
#total_confirmed = sum([d.val for d in shownData])
#total_death = sum([d.val for d in deathData])
greatest_model = Covid19Model.objects.get(name="SI-kJalpha - 40x")
greatest_predictions = Covid19PredictionDataPoint.objects.filter(model=greatest_model)
greatest_predictions = greatest_predictions.filter(date=greatest_predictions.last().date, social_distancing=1)
greatest_vals = [d.val for d in greatest_predictions]
max_val = max(greatest_vals)
greatest_death_model = Covid19Model.objects.get(name="SI-kJalpha - 40x (death prediction)")
greatest_death_predictions = Covid19PredictionDataPoint.objects.filter(model=greatest_death_model)
greatest_death_predictions = greatest_death_predictions.filter(date=greatest_death_predictions.last().date,
social_distancing=1)
greatest_death_vals = [d.val for d in greatest_death_predictions]
max_death_val = max(greatest_death_vals)
response = [{
'area': {
'country': d.area.country,
'state': d.area.state,
'iso_2': d.area.iso_2,
},
'value': d.val,
#'value_percentage': 1e3*d.val/total_confirmed,
'max_val_percentage': 1e4*d.val/max_val,
'date': d.date,
'deathValue': deathData.filter(area=d.area, date=d.date).first().val,
'max_death_percentage':1e4*deathData.filter(area=d.area, date=d.date).first().val/max_death_val,
#'death_percentage': 1e3*deathData.filter(area=d.area, date=d.date).first().val/total_death,
} for d in shownData]
return Response(response) | 7ecdf3e41304d99b11e7695e2962dbe3b7f6c96a | 611 |
def check_partial(func, *args, **kwargs):
"""Create a partial to be used by goodtables."""
new_func = partial(func, *args, **kwargs)
new_func.check = func.check
return new_func | 55a723bc81e5666db9fdd97a4ea88d36635e3dc3 | 612 |
def mcas(mc, entries):
"""Multi-entry compare-and-set.
Synopsis:
>>> from memcache_collections import mcas
>>> mc = memcache.Client(['127.0.0.1:11211'], cache_cas=True)
>>> # initialize a doubly-linked list with two elements
>>> mc.set_multi({
... 'foo': {'next': 'bar'},
... 'bar': {'prev': 'foo'}})
[]
>>> # Always use mcas_get to access entries potentially in MCAS
>>> # operations. It returns an object representing a memcache entry
>>> # snapshot.
>>> foo_entry, bar_entry = mcas_get(mc, 'foo'), mcas_get(mc, 'bar')
>>> foo_entry.key, foo_entry.value
('foo', {'next': 'bar'})
>>> # atomically insert new node in our doubly linked list via MCAS
>>> mc.add('baz', {'prev': 'foo', 'next': 'bar'})
1
>>> mcas(mc, [
... (foo_entry, {'next': 'baz'}),
... (bar_entry, {'prev': 'baz'})])
True
Function is not thread safe due to implicit CAS ID handling of the
Python API.
Args:
mc: memcache client
entries: iterable of (Entry, new_value) tuples
Returns: True if MCAS completed successfully.
The aggregate size of current and new values for all entries must fit
within the memcache value limit (typically 1 MB).
Based on "Practical lock-freedom", Keir Fraser, 2004, pp. 30-34.
"""
dc = _DequeClient(mc)
mcas_record = _McasRecord(mc, entries)
dc.AddNode(mcas_record)
# very sad that we need to read this back just to get CAS ID
dc.mc.gets(mcas_record.uuid)
return _mcas_help(dc, mcas_record, is_originator=True) | 741cbc4d9962292fc544b945785a72cc25060d5b | 613 |
def figure_8s(N_cycles=2, duration=30, mag=0.75):
"""
Scenario: multiple figure-8s.
Parameters
----------
N_cycles : int
How many cycles of left+right braking.
duration : int [sec]
Seconds per half-cycle.
mag : float
Magnitude of braking applied.
"""
on = [(2.0, mag), (duration - 2.0, None)] # Braking on
off = [(1.0, 0), (duration - 1.0, None)] # Braking off
inputs = {
"delta_br": simulation.linear_control([(2, 0), *([*on, *off] * N_cycles)]),
"delta_bl": simulation.linear_control([(2, 0), *([*off, *on] * N_cycles)]),
}
T = N_cycles * duration * 2
return inputs, T | 3bb485b23ea337b038a52fa946b5080cb8ae79eb | 614 |
import inspect
def grad_ast(func, wrt, motion, mode, preserve_result, verbose):
"""Perform AD on a single function and return the AST.
Args:
See `grad`.
Returns:
node: The AST of a module containing the adjoint and primal function
definitions.
required: A list of non-built in functions that this function called, and
of which the primals and adjoints need to be made available in order
for the returned function to run.
"""
node = annotate.resolve_calls(func)
fence.validate(node, inspect.getsource(func))
node = anf_.anf(node)
if verbose >= 2:
print('ANF')
print(quoting.to_source(node))
if mode == 'reverse':
node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt,
preserve_result)
if verbose >= 2:
print('RAW')
print(quoting.to_source(node))
if motion == 'split':
node = reverse_ad.split(node, stack)
else:
node = reverse_ad.joint(node)
if verbose >= 2:
print('MOTION')
print(quoting.to_source(node))
elif mode == 'forward':
node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result)
return node, required | 1b358f36fb73169fa31bfd3266ccfae172980178 | 615 |
import re
def sortRules(ruleList):
"""Return sorted list of rules.
Rules should be in a tab-delimited format: 'rule\t\t[four letter negation tag]'
Sorts list of rules descending based on length of the rule,
splits each rule into components, converts pattern to regular expression,
and appends it to the end of the rule. """
ruleList.sort(key = len, reverse = True)
sortedList = []
for rule in ruleList:
s = rule.strip().split('\t')
splitTrig = s[0].split()
trig = r'\s+'.join(splitTrig)
pattern = r'\b(' + trig + r')\b'
s.append(re.compile(pattern, re.IGNORECASE))
sortedList.append(s)
return sortedList | 5b98903fd48f562d22e0ce269aa55e52963fa4a9 | 616 |
def v3_settings_response():
"""Define a fixture that returns a V3 subscriptions response."""
return load_fixture("v3_settings_response.json") | 909d538388dde838bc57d6084c98db40b24db4f8 | 617 |
def Geom2dLProp_Curve2dTool_FirstParameter(*args):
"""
* returns the first parameter bound of the curve.
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: float
"""
return _Geom2dLProp.Geom2dLProp_Curve2dTool_FirstParameter(*args) | f9dea146d3e9002c17cddd3f60ff6fe5362a1268 | 618 |
def decode_regression_batch_image(x_batch, y_batch, x_post_fn = None, y_post_fn = None, **kwargs):
"""
x_batch: L or gray (batch_size, height, width, 1)
y_batch: ab channel (batch_size, height, width, 2)
x_post_fn: decode function of x_batch
y_post_fn: decode function of y_batch
"""
assert len(y_batch.shape)==4 and y_batch.shape[3]==2, "Invalid y_batch shape (batchsize, height, width, 2)"
assert len(x_batch.shape)==3 and x_batch.shape[3]==1, "Invalid y_batch shape (batchsize, height, width, 1)"
y_height, y_width = y_batch.shape[1:3]
x_height, x_width = x_batch.shape[1:3]
if x_height != y_height or x_width != y_width:
y_batch = sni.zoom(y_batch, [1, 1.*x_height/y_height, 1.*x_width/y_width, 1])
# if
x_batch = x_post_fn(x_batch) if x_post_fn is not None else x_batch
y_batch = y_post_fn(y_batch) if y_post_fn is not None else y_batch
y_batch_Lab = np.concatenate([y_batch_L, y_batch_ab], axis = 3)
y_batch_RGB = np.array([cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_Lab2RGB) for image in y_batch_Lab])
return y_batch_RGB | 270c0b60646c0d16f8f8a4f3f2d5933bef369e3e | 619 |
def get_mag_from_obs(h, e, d0=0):
"""gets the magnetic north components given the observatory components.
Parameters
__________
h: array_like
the h component from the observatory
e: array_like
the e component from the observatory
d0: float
the declination baseline angle in radians
Returns
_______
tuple of array_like
[0]: total h component as a float
[1]: total d declination as a float
"""
mag_h = get_mag_h_from_obs(h, e)
mag_d = get_mag_d_from_obs(h, e, d0)
return (mag_h, mag_d) | 27f4d538c3a9e13522f195bd9abb2683037ba72a | 620 |
def set_cell(client, instance, colid, value, file_=None):
"""Set the value of one cell of a family table.
Args:
client (obj):
creopyson Client.
instance (str):
Family Table instance name.
colid (str):
Column ID.
value (depends on data type):
Cell value.
`file_` (str, optional):
File name (usually an assembly).
Defaults is currently active model.
Returns:
None
"""
data = {
"instance": instance,
"colid": colid,
"value": value,
}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
return client._creoson_post("familytable", "set_cell", data) | b1617484a3a710046cda5daaf6bbe9b376f83418 | 622 |
def find_bounds(particles):
"""
Find the maximum and minimum bounds describing a set of particles.
"""
min_bound = np.array(
[np.min(particles[:, 0]), np.min(particles[:, 1]), np.min(particles[:, 2])]
)
max_bound = np.array(
[np.max(particles[:, 0]), np.max(particles[:, 1]), np.max(particles[:, 2])]
)
return max_bound, min_bound | 2640ce6aa79cbe4392d2e044bddf46a94f6b76af | 623 |
def get_tags_date(link, default_date=None):
"""Extract tags and date from the link."""
tags = ["links"]
date = ""
fltr = [
"Bookmarks Menu",
"Bookmark Bar",
"Personal Toolbar Folder",
"Importierte Lesezeichen",
"Bookmarks Toolbar",
"Kein Label vorhanden",
"Unsorted Bookmarks",
"Unsortierte Lesezeichen",
"Recently Bookmarked",
"Recent Tags",
]
for parent in link.parents:
if parent.name == "dl":
for sibling in parent.previous_siblings:
if sibling.name == "h3":
tags += sibling.get_text().split(">")
datestr = (
sibling.get("add_date", None)
or sibling.get("last_visit", None)
or sibling.get("last_modified", None)
or default_date
)
date = convert_date(datestr)
for sibling in parent.next_siblings:
if sibling.name == "h3":
tags += sibling.get_text().split(">")
datestr = (
sibling.get("add_date", None)
or sibling.get("last_visit", None)
or sibling.get("last_modified", None)
or default_date
)
date = convert_date(datestr)
break
return ([standardize_tag(i) for i in tags if i not in fltr], date) | b9f28a1f2f819cdfcc794ce3dc507ff4df288906 | 624 |
from datetime import datetime
def _is_future(time, time_ref=None):
"""
check if `time` is in future (w.r.t. `time_ref`, by default it is now)
Parameters
----------
time : int or datetime
the time to check (if int it's considered a
timestamp, see :py:meth:`datetime.timestamp`)
time_ref : int or datetime
the time reference (if int it's considered a timestamp, see
:py:meth:`datetime.timestamp`), if None use the present time
(default: None)
Returns
-------
bool
is in future or not
"""
time = _parse_time_from_input(time, "time")
if time_ref is None:
time_ref = datetime.now()
else:
time_ref = _parse_time_from_input(time_ref, "time_ref")
return time > time_ref | ece5121ee0e49c77a260b117cb0f251a35aa289b | 625 |
from datetime import datetime
def create_and_train_model(x_learn, y_learn, model, n_cores):
"""General method to create and train model"""
print(model.fit(x_learn, y_learn))
start_time = datetime.now()
c_val = cross_val_score(model, x_learn, y_learn, cv=10, n_jobs=n_cores)
end_time = datetime.now()
print(type(model).__name__, "with n_jobs =", n_cores, "took:", (end_time.second - start_time.second), "seconds")
print(type(model).__name__, "cross_val_score:", c_val.mean())
return model, c_val | babef20c893c39a09fe4a0e0f777b2ec326b694c | 626 |
def cardidolizedimageurl(context, card, idolized, english_version=False):
"""
Returns an image URL for a card in the context of School Idol Contest
"""
prefix = 'english_' if english_version else ''
if card.is_special or card.is_promo:
idolized = True
if idolized:
if getattr(card, prefix + 'round_card_idolized_image'):
return _imageurl(getattr(card, prefix + 'round_card_idolized_image'), context=context)
if getattr(card, prefix + 'card_idolized_image'):
return _imageurl(getattr(card, prefix + 'card_idolized_image'), context=context)
return _imageurl('static/default-' + card.attribute + '.png', context=context)
if getattr(card, prefix + 'round_card_image'):
return _imageurl(getattr(card, prefix + 'round_card_image'), context=context)
if getattr(card, prefix + 'card_image'):
return _imageurl(getattr(card, prefix + 'card_image'), context=context)
return _imageurl('static/default-' + card.attribute + '.png', context=context) | c69e1a4d998d632091fcf1d69a240d68386e0b21 | 627 |
def extract_simple_tip(e):
"""
"""
emin = e.min()
emax = e.max()
indices = [nearest_index(emin), nearest_index(emax)]
indices.sort()
imin,imax = indices
imax +=1 # for python style indexing
return imin, imax | c3d215ee34caa733a83b8be54cc92f9071839bef | 628 |
def parse_pipeline_config(pipeline_config_file):
"""Returns pipeline config and meta architecture name."""
with tf.gfile.GFile(pipeline_config_file, 'r') as config_file:
config_str = config_file.read()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(config_str, pipeline_config)
meta_arch = pipeline_config.model.WhichOneof('model')
return pipeline_config, meta_arch | 4e7047c2e7b195bce8cadf83fa19e1b7c1cb16ca | 629 |
import math
def get_pwl(time_series, pwl_epsilon):
""" This is a wrapper function for getting a bounded piecewise linear approximation of the data """
if not isinstance(pwl_epsilon, (int, float)):
raise TypeError("pwl_epsilon must be a numeric type!")
if not (isinstance(time_series, pd.DataFrame) or isinstance(time_series, list)):
raise TypeError("The argument time_series must be a Pandas Dataframe, or a list!")
if isinstance(time_series, pd.DataFrame):
# just how hakimis algorithm wants the data
polyline_from_data = list(zip(time_series.index.tolist(),
time_series[construct_variable_name(1)].values.tolist()))
else:
polyline_from_data = time_series
if math.isclose(pwl_epsilon, 0.0):
return polyline_from_data
else:
approx_grap = create_approximation_graph(timeseries=polyline_from_data, epsilon=pwl_epsilon)
shortest_path_gen =\
nx.all_shortest_paths(approx_grap, tuple(polyline_from_data[0]), tuple(polyline_from_data[-1]))
# this avoids generating all paths, since we take just the first one (saves memory and time)
return next(shortest_path_gen) | 431d83e59e4f3faa3cbe7f21106690f030330529 | 630 |
def to_array(string):
"""Converts a string to an array relative to its spaces.
Args:
string (str): The string to convert into array
Returns:
str: New array
"""
try:
new_array = string.split(" ") # Convert the string into array
while "" in new_array: # Check if the array contains empty strings
new_array.remove("")
return new_array
except:
print("The parameter string is not a str")
return string | 7ee87a2b245a71666939e9ce2e23dc07fcaa0153 | 631 |
def convert_atoms_to_pdb_molecules(atoms: t.List[Atom]) -> t.List[str]:
"""
This function converts the atom list into pdb blocks.
Parameters
----------
atoms : t.List[Atom]
List of atoms
Returns
-------
t.List[str]
pdb strings of that molecule
"""
# 1) GROUP ATOMS BT MOLECULES
molecules = defaultdict(list)
for a in atoms:
molecules[a.resi].append(a)
# 2) CONSTUCT PDB BLOCKS
#ref: https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
pdb_format = "ATOM {:>5d} {:<2}{:1}{:>3} {:1}{:>3d}{:1} {:>7.3f}{:>7.3f}{:>7.3f}{:>5}{:>6}{:<3}{:>2} {:>2d}"
dummy_occupancy= dummy_bfactor= dummy_charge = 0.0
dummy_alt_location= dummy_chain= dummy_insertion_code= dummy_segment = ""
pdb_molecules: t.List[str] = []
for m_ID in sorted(molecules):
m = molecules[m_ID]
atoms_as_lines: t.List[str] = []
for a in sorted(m, key= lambda x: x.id):
atoms_as_lines.append(pdb_format.format(int(a.id), a.name, dummy_alt_location, a.resn, dummy_chain, int(a.resi), dummy_insertion_code, a.x, a.y, a.z, dummy_occupancy, dummy_bfactor, dummy_segment, a.elem, int(dummy_charge)))
# Sort by Id: => convert str up do first space to int
#atoms_as_lines = sorted(atoms_as_lines, key=lambda x: int(x[:x.index('\t')]))
molecule_as_str = "TITLE "+a.resn+"\n"+'\n'.join(atoms_as_lines) + '\nEND'
# molecule_as_str = molecule_as_str.replace('\t',' ')
pdb_molecules.append(molecule_as_str)
print(pdb_molecules[-1])
return pdb_molecules | 3f073818b92ce1db7b2e8c4aaf0724afb546beba | 632 |
def unvoiced_features(sig,fs,vcont,sil_cont):
"""
Unvoiced segment features.
Requires voiced and silence/pauses segment detection.
"""
#Unvoiced features
uv_seg,_,_ = unvoiced_seg(sig,fs,vcont,sil_cont)
lunvoiced = []
for uv in uv_seg:
lunvoiced.append(len(uv)/fs)#Length of unvoiced segment
uunvoiced = np.mean(lunvoiced)#Average length
# sunvoiced = np.std(lunvoiced)#variation of length
uvrate = (len(uv_seg)*fs)/len(sig)#Unvoiced segments per second
numuv = len(uv_seg)
rPVI,nPVI = get_pvi(lunvoiced)
pGPI,dGPI = get_gpi(lunvoiced,len(sig)/fs)
# feats_unvoiced = np.hstack([numuv,uvrate,uunvoiced,rPVI,nPVI,pGPI,dGPI])
feats_unvoiced = {'Unvoiced_counts':numuv,
'Unvoiced_rate':uvrate,
'Unvoiced_duration':uunvoiced,
'Unvoiced_rPVI':rPVI,
'Unvoiced_nPVI':nPVI,
'Unvoiced_dGPI':dGPI}
return feats_unvoiced | bb6f5fc0c939a7d838140b7a7eadda2ff32e5592 | 633 |
def civic_methods(method001, method002, method003):
"""Create test fixture for methods."""
return [method001, method002, method003] | 63913e2cfe866c65d9a1e7d5d3ba2e081b8e12f6 | 634 |
def _generate_tags(encoding_type, number_labels=4):
"""
:param encoding_type: 例如BIOES, BMES, BIO等
:param number_labels: 多少个label,大于1
:return:
"""
vocab = {}
for i in range(number_labels):
label = str(i)
for tag in encoding_type:
if tag == 'O':
if tag not in vocab:
vocab['O'] = len(vocab) + 1
continue
vocab['{}-{}'.format(tag, label)] = len(vocab) + 1 # 其实表达的是这个的count
return vocab | 36125c684be9dc1d0abc522d536276be7e3d7328 | 635 |
def delete_link_tag(api_client, link_id, tag_key, **kwargs): # noqa: E501
"""delete_link_tag # noqa: E501
Delete link tag by key
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_link_tag(client, link_id, tag_key, async_req=True)
:param link_id str: str of link. e.g. lnk0
:param tag_key str: key of tag
:param async_req bool: execute request asynchronously
:param bool sorted: Sort resources
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [] # noqa: E501
collection_formats = {}
path_params = {"link_id": link_id, "tag_key": tag_key}
query_params = []
for param in [p for p in request_params if local_var_params.get(p) is not None]:
query_params.append((param, local_var_params[param])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/links/{link_id}/tags/{tag_key}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 5e643eaa02b50a733030ac35da5821a494ee2517 | 636 |
def infect_graph(g, title):
"""
Function to infect the graph using SI model.
Parameters:
g: Graph
Returns:
G : Infected graph
t : Time of diffusion of each node
"""
G=g
# Model selection - diffusion time
model = ep.SIModel(G)
nos = 1/len(G)
# Model Configuration
config = mc.Configuration()
config.add_model_parameter('beta', 0.03)
config.add_model_parameter("fraction_infected", 0.05)
model.set_initial_status(config)
# Simulation execution
iterations = model.iteration_bunch(200)
diffusionTime={}
for i in range(1,len(G)):
diffusionTime[i]=-1
for i in iterations:
for j in i['status']:
if(i['status'][j]==1):
diffusionTime[j]=i['iteration']
nodeColor = []
source_nodes = []
for i in G.nodes():
if iterations[0]["status"][i]==1:
nodeColor.append('red')
source_nodes.append(i)
else:
nodeColor.append('blue')
sorted_values = sorted(diffusionTime.values()) # Sort the values
sorted_dict = {}
for i in sorted_values:
for k in diffusionTime.keys():
if diffusionTime[k] == i:
sorted_dict[k] = diffusionTime[k]
plt.clf()
nx.draw(G, node_color=nodeColor, with_labels=True)
plt.title('Intial Phase')
plt.savefig(f'./plots/{title}_Initial-infect.png')
plt.clf()
nx.draw(G, node_color=list(x for i,x in diffusionTime.items()),cmap=plt.cm.Reds, with_labels=True)
plt.title('Final Phase')
plt.savefig(f'./plots/{title}_Final-infect.png')
return (G, sorted_dict, source_nodes) | b0c3f5d2518083cabf4de4214acf65027fc623f5 | 637 |
def main_func_SHORT():
""" Func. called by the main T """
sleep(SHORT)
return True | de5cffd80a74a048f0016e5912dd15b93f0a4dd6 | 638 |
from typing import Tuple
import math
def split_train_test(X: pd.DataFrame, y: pd.Series, train_proportion: float = .75) \
-> Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:
"""
Randomly split given sample to a training- and testing sample
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Data frame of samples and feature values.
y : Series of shape (n_samples, )
Responses corresponding samples in data frame.
train_proportion: Fraction of samples to be split as training set
Returns
-------
train_X : DataFrame of shape (ceil(train_proportion * n_samples), n_features)
Design matrix of train set
train_y : Series of shape (ceil(train_proportion * n_samples), )
Responses of training samples
test_X : DataFrame of shape (floor((1-train_proportion) * n_samples), n_features)
Design matrix of test set
test_y : Series of shape (floor((1-train_proportion) * n_samples), )
Responses of test samples
"""
no_of_train_rows = math.ceil(train_proportion * X.shape[0])
X : pd.DataFrame = pd.DataFrame.join(X,y)
train_data = X.sample(n=no_of_train_rows, axis=0)
test_data = X.loc[X.index.difference(train_data.index), ]
train_y = train_data[y.name]
test_y = test_data[y.name]
train_data.drop(columns=y.name,inplace=True)
test_data.drop(columns=y.name,inplace=True)
# print(train_data.shape)
# print(test_data.shape)
# print(train_y.shape)
# print(test_y.shape)
return train_data, train_y, test_data, test_y | 2261e72821ab55ac12f4f0b88511ce1f6d9d8d5f | 639 |
def s2sdd(s):
""" Converts a 4-port single-ended S-parameter matrix
to a 2-port differential mode representation.
Reference: https://www.aesa-cortaillod.com/fileadmin/documents/knowledge/AN_150421_E_Single_ended_S_Parameters.pdf
"""
sdd = np.zeros((2, 2), dtype=np.complex128)
sdd[0, 0] = 0.5*(s[0, 0] - s[0, 2] - s[2, 0] + s[2, 2])
sdd[0, 1] = 0.5*(s[0, 1] - s[0, 3] - s[2, 1] + s[2, 3])
sdd[1, 0] = 0.5*(s[1, 0] - s[1, 2] - s[3, 0] + s[3, 2])
sdd[1, 1] = 0.5*(s[1, 1] - s[1, 3] - s[3, 1] + s[3, 3])
return sdd | 0d29d2d248a49dd27bab202abd84014aab799907 | 640 |
def plot_gdf(gdf, map_f=None, maxitems=-1, style_func_args={}, popup_features=[],
tiles='cartodbpositron', zoom=6, geom_col='geometry', control_scale=True):
"""
:param gdf: GeoDataFrame
GeoDataFrame to visualize.
:param map_f: folium.Map
`folium.Map` object where the GeoDataFrame `gdf` will be plotted. If `None`, a new map will be created.
:param maxitems: int
maximum number of tiles to plot. If `-1`, all tiles will be plotted.
:param style_func_args: dict
dictionary to pass the following style parameters (keys) to the GeoJson style function of the polygons:
'weight', 'color', 'opacity', 'fillColor', 'fillOpacity', 'radius'
:param popup_features: list
when clicking on a tile polygon, a popup window displaying the information in the
columns of `gdf` listed in `popup_features` will appear.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param geom_col: str
name of the geometry column of `gdf`.
:param control_scale: bool
if `True`, add scale information in the bottom left corner of the visualization. The default is `True`.
Returns
-------
`folium.Map` object with the plotted GeoDataFrame.
"""
if map_f is None:
# initialise map
lon, lat = np.mean(np.array(list(gdf[geom_col].apply(utils.get_geom_centroid).values)), axis=0)
map_f = folium.Map(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale=control_scale)
count = 0
for k in gdf.index:
g = gdf.loc[k]
if type(g[geom_col]) == gpd.geoseries.GeoSeries:
for i in range(len(g[geom_col])):
map_f = add_to_map(g[geom_col].iloc[i], g.iloc[i], map_f,
popup_features=popup_features,
style_func_args=style_func_args)
else:
map_f = add_to_map(g[geom_col], g, map_f,
popup_features=popup_features,
style_func_args=style_func_args)
count += 1
if count == maxitems:
break
return map_f | 80c4002ca82e849a1701c00ead54671729009670 | 641 |
def set_catflap_cat_inside(request, catflap_uuid):
"""GET so it can be used as an email link."""
catflap = CatFlap.objects.get(uuid=catflap_uuid)
if not catflap.cat_inside:
catflap.cat_inside = True
catflap.save()
track_manual_intervention(catflap, cat_inside=True)
return redirect_to_status_page(request, catflap_uuid) | cb7feabe83ef69598aff9fe6ba9867996312c892 | 642 |
import ctypes
def feature_list():
"""Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc
Returns
-------
list
List of :class:`.Feature` objects
"""
lib_features_c_array = ctypes.POINTER(Feature)()
lib_features_size = ctypes.c_size_t()
check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size)))
features = [lib_features_c_array[i] for i in range(lib_features_size.value)]
return features | ec20748fb4aae07898949822b3a7ba9835f4ed57 | 643 |
def _symm_herm(C):
"""To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_jilk*"""
nans = np.isnan(C)
C[nans] = np.einsum('jilk', C)[nans].conj()
return C | bd2e46b3ed751eb380aedd8d280294177fb6b3fd | 644 |
def cat(self, dim=0):
"""Map of 'cat' pytorch method."""
x = self
dim = _dim_explicit(x[0].shape, dim)
return P.concat(x, dim) | 9eba4de4941ac437e82f98647e0c6dc014b1578f | 645 |
import re
def _name_xform(o):
"""transform names to lowercase, without symbols (except underscore)
Any chars other than alphanumeric are converted to an underscore
"""
return re.sub("\W", "_", o.lower()) | 8ea563f805493d8885c143d9c2e2e54447ef19e8 | 646 |
def runner(app):
"""创建一个运行器,用于调用应用注册的 Click 命令"""
return app.test_cli_runner() | f9ffb3040045e0789a5686eb9a80f3fdef126a9d | 647 |
def create_activation_cache(model):
"""Creates an activation cache for the tensors of a model."""
input_quantizer = quantized_relu(8, 0)
output_cache = {}
# If using a Sequential model, the input layer is hidden. Therefore, add the
# input quantization to the cache if the first layer is not an input layer
if not isinstance(model.layers[0], InputLayer):
output_cache[model.layers[0].input.experimental_ref()] = input_quantizer
# cache graph tensors' activations
for l in model.layers:
output_cache[l.output.experimental_ref()] = l
if isinstance(l, QActivation) or isinstance(l, QAdaptiveActivation) :
output_cache[l.output.experimental_ref()] = l.quantizer
elif isinstance(l, InputLayer):
# assume the input is 8-bit positive value
output_cache[l.output.experimental_ref()] = input_quantizer
elif l.__class__.__name__ in [
"QDense", "QConv2D", "QConv1D", "QDepthwiseConv2D"
]:
output_cache[l.output.experimental_ref()] = l.activation
else:
if isinstance(l.input, list):
# right now, we just get the first one - we assume this is the leading
# one.
all_q = [
output_cache.get(l.input[i].experimental_ref())
for i in range(len(l.input))
]
q = all_q[0]
else:
q = output_cache.get(l.input.experimental_ref(), None)
output_cache[l.output.experimental_ref()] = q
if q is None:
raise ValueError("Unknown operation in {}".format(l.name))
return output_cache | a35c11e95831e3aa51fadce75577e97bd150cc1e | 648 |
def feature_scatterplot(fset_path, features_to_plot):
"""Create scatter plot of feature set.
Parameters
----------
fset_path : str
Path to feature set to be plotted.
features_to_plot : list of str
List of feature names to be plotted.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
fset, data = featurize.load_featureset(fset_path)
fset = fset[features_to_plot]
colors = cycle(palette[5])
plots = np.array([[figure(width=300, height=200)
for j in range(len(features_to_plot))]
for i in range(len(features_to_plot))])
for (j, i), p in np.ndenumerate(plots):
if (j == i == 0):
p.title.text = "Scatterplot matrix"
p.circle(fset.values[:,i], fset.values[:,j], color=next(colors))
p.xaxis.minor_tick_line_color = None
p.yaxis.minor_tick_line_color = None
p.ygrid[0].ticker.desired_num_ticks = 2
p.xgrid[0].ticker.desired_num_ticks = 4
p.outline_line_color = None
p.axis.visible = None
plot = gridplot(plots.tolist(), ncol=len(features_to_plot), mergetools=True, responsive=True, title="Test")
# Convert plot to json objects necessary for rendering with bokeh on the
# frontend
render_items = [{'docid': plot._id, 'elementid': make_id()}]
doc = Document()
doc.add_root(plot)
docs_json_inner = doc.to_json()
docs_json = {render_items[0]['docid']: docs_json_inner}
docs_json = serialize_json(docs_json)
render_items = serialize_json(render_items)
return docs_json, render_items | e8e0a545b992042eb334554042c9efa68a4f6a1f | 649 |
def model1(v, va, vb, ka, Wa, Wb, pa):
"""
A translation of the equation from Sandström's Dynamic NMR Spectroscopy,
p. 14, for the uncoupled 2-site exchange simulation.
v: frequency whose amplitude is to be calculated
va, vb: frequencies of a and b singlets (slow exchange limit) (va > vb)
ka: rate constant for state A--> state B
pa: fraction of population in state Adv: frequency difference (va - vb)
between a and b singlets (slow exchange)
T2a, T2b: T2 (transverse relaxation time) for each nuclei
returns: amplitude at frequency v
"""
pi = np.pi
pb = 1 - pa
tau = pb / ka
dv = va - vb
Dv = (va + vb) / 2 - v
T2a = 1 / (pi * Wa)
T2b = 1 / (pi * Wb)
P = tau * ((1 / (T2a * T2b)) - 4 * (pi ** 2) * (Dv ** 2) +
(pi ** 2) * (dv ** 2))
P += ((pa / T2a) + (pb / T2b))
Q = tau * (2 * pi * Dv - pi * dv * (pa - pb))
R = 2 * pi * Dv * (1 + tau * ((1 / T2a) + (1 / T2b)))
R += pi * dv * tau * ((1 / T2b) - (1 / T2a)) + pi * dv * (pa - pb)
I = (P * (1 + tau * ((pb / T2a) + (pa / T2b))) + Q * R) / (P ** 2 + R ** 2)
return I | c1110cbd16bfcd942a086e8d878bfe3117bf4f99 | 650 |
def calculate_laminar_flame_speed(
initial_temperature,
initial_pressure,
species_dict,
mechanism,
phase_specification="",
unit_registry=_U
):
"""
This function uses cantera to calculate the laminar flame speed of a given
gas mixture.
Parameters
----------
initial_temperature : pint.Quantity
Initial temperature of gas mixture
initial_pressure : pint.Quantity
Initial pressure of gas mixture
species_dict : dict
Dictionary with species names (all caps) as keys and moles as values
mechanism : str
String of mechanism to use (e.g. "gri30.cti")
phase_specification : str
Phase specification for cantera solution
unit_registry : pint.UnitRegistry
Unit registry for managing units to prevent conflicts with parent
unit registry
Returns
-------
pint.Quantity
Laminar flame speed in m/s as a pint quantity
"""
gas = ct.Solution(mechanism, phase_specification)
quant = unit_registry.Quantity
tools.check_pint_quantity(
initial_pressure,
"pressure",
ensure_positive=True
)
tools.check_pint_quantity(
initial_temperature,
"temperature",
ensure_positive=True
)
# ensure species dict isn't empty
if len(species_dict) == 0:
raise ValueError("Empty species dictionary")
# ensure all species are in the mechanism file
bad_species = ""
good_species = gas.species_names
for species in species_dict:
if species not in good_species:
bad_species += species + "\n"
if len(bad_species) > 0:
raise ValueError("Species not in mechanism:\n" + bad_species)
gas.TPX = (
initial_temperature.to("K").magnitude,
initial_pressure.to("Pa").magnitude,
species_dict
)
# find laminar flame speed
flame = ct.FreeFlame(gas)
flame.set_refine_criteria(ratio=3, slope=0.1, curve=0.1)
flame.solve(loglevel=0)
return quant(flame.u[0], "m/s") | cdceb35dd5313d32e5b7cdfd4af2b842d2019587 | 651 |
def extrapolate_coverage(lines_w_status):
"""
Given the following input:
>>> lines_w_status = [
(1, True),
(4, True),
(7, False),
(9, False),
]
Return expanded lines with their extrapolated line status.
>>> extrapolate_coverage(lines_w_status) == [
(1, True),
(2, True),
(3, True),
(4, True),
(5, None),
(6, None),
(7, False),
(8, False),
(9, False),
]
"""
lines = []
prev_lineno = 0
prev_status = True
for lineno, status in lines_w_status:
while (lineno - prev_lineno) > 1:
prev_lineno += 1
if prev_status is status:
lines.append((prev_lineno, status))
else:
lines.append((prev_lineno, None))
lines.append((lineno, status))
prev_lineno = lineno
prev_status = status
return lines | e7685359f570ae979f2421c3a64513409b9df352 | 652 |
def get_image_features(filename):
"""
Param:
Path to image
Returns:
Desired features of image in the form of a dictionary (key = feature_name, value = feature_value)
"""
array, metadata = nrrd.read(filename)
return {k: f(array, metadata, filename) for k, f in image_feature_functions.items()} | 0f991ebed175f0f41e30654cb4665dea09a1053d | 653 |
def get_DCT_transform_matrix(N):
"""
Return the normalised N-by-N discrete cosine transform (DCT) matrix.
Applying the returned transform matrix to a vector x: D.dot(x) yields the
DCT of x. Applying the returned transform matrix to a matrix A: D.dot(A)
applies the DCT to the columns of A. Taking D.dot(A.dot(D.T)) applies the
DCT to both columns and rows, i.e. a full 2D separable DCT transform. The
inverse transform (the 1D IDCT) is D.T.
Parameters
----------
N : int
The size of the DCT transform matrix to return.
Returns
-------
D : ndarray
The DCT transform matrix.
Notes
-----
The returned DCT matrix normalised such that is consitutes a orthonormal
transform as given by equations (2.119) and (2.120) in [1]_.
References
----------
.. [1] A.N. Akansu, R.A. Haddad, and P.R. Haddad, *Multiresolution Signal
Decomposition: Transforms, Subbands, and Wavelets*, Academic Press,
2000.
Examples
--------
For example, get a 5-by-5 DCT matrix
>>> import numpy as np
>>> from magni.imaging.dictionaries import get_DCT_transform_matrix
>>> D = get_DCT_transform_matrix(5)
>>> np.round(np.abs(D), 4)
array([[ 0.4472, 0.4472, 0.4472, 0.4472, 0.4472],
[ 0.6015, 0.3717, 0. , 0.3717, 0.6015],
[ 0.5117, 0.1954, 0.6325, 0.1954, 0.5117],
[ 0.3717, 0.6015, 0. , 0.6015, 0.3717],
[ 0.1954, 0.5117, 0.6325, 0.5117, 0.1954]])
and apply the 2D DCT transform to a dummy image
>>> np.random.seed(6021)
>>> img = np.random.randn(5, 5)
>>> img_dct = D.dot(img.dot(D.T))
>>> np.round(img_dct, 4)
array([[-0.5247, -0.0225, 0.9098, 0.369 , -0.477 ],
[ 1.7309, -0.4142, 1.9455, -0.6726, -1.3676],
[ 0.6987, 0.5355, 0.7213, -0.8498, -0.1023],
[ 0.0078, -0.0545, 0.3649, -1.4694, 1.732 ],
[-1.5864, 0.156 , 0.8932, -0.8091, 0.5056]])
"""
@_decorate_validation
def validate_input():
_numeric('N', 'integer', range_='[1;inf)')
validate_input()
nn, rr = np.meshgrid(*map(np.arange, (N, N)))
D = np.cos((2 * nn + 1) * rr * np.pi / (2 * N))
D[0, :] /= np.sqrt(N)
D[1:, :] /= np.sqrt(N/2)
return D | 87314407c0836892a79747ea01aa9c369224198b | 654 |
def get_reduce_nodes(name, nodes):
"""
Get nodes that combine the reduction variable with a sentinel variable.
Recognizes the first node that combines the reduction variable with another
variable.
"""
reduce_nodes = None
for i, stmt in enumerate(nodes):
lhs = stmt.target.name
rhs = stmt.value
if isinstance(stmt.value, ir.Expr):
in_vars = set(v.name for v in stmt.value.list_vars())
if name in in_vars:
args = get_expr_args(stmt.value)
args.remove(name)
assert len(args) == 1
replace_vars_inner(stmt.value, {args[0]:
ir.Var(stmt.target.scope, name+"#init", stmt.target.loc)})
reduce_nodes = nodes[i:]
break;
assert reduce_nodes, "Invalid reduction format"
return reduce_nodes | 8438829236f9c45986d1e1394cd0c6864caec73f | 655 |
def extract_mesh_descriptor_id(descriptor_id_str: str) -> int:
""" Converts descriptor ID strings (e.g. 'D000016') into a number ID (e.g. 16). """
if len(descriptor_id_str) == 0:
raise Exception("Empty descriptor ID")
if descriptor_id_str[0] != "D":
raise Exception("Expected descriptor ID to start with 'D', {}".format(descriptor_id_str))
return int(descriptor_id_str[1:]) | 9f013eadee9a149b9617e4a1c058bbe67c6dd8ba | 656 |
def process_sources(sources_list):
"""
This function processes the sources result
:param sources_list: A list of dictionaries
:return: A list of source objects
"""
sources_results = []
for sources_item in sources_list:
id = sources_item.get('id')
name = sources_item.get('name')
description = sources_item.get('description')
url = sources_item.get('url')
category = sources_item.get('category')
language = sources_item.get('language')
country = sources_item.get('country')
print(sources_item)
sources_object = Sources(id, name, description, url)
sources_results.append(sources_object)
return sources_results | 7742a721802c66daf525520969f5831f9a497137 | 657 |
import math
def encrypt(message_text, key):
"""Method Defined for ENCRYPTION of a Simple \
String message into a Cipher Text Using \
2x2 Hill Cipher Technique
\nPARAMETERS\n
message_text: string to be encrypted
key: string key for encryption with length <= 4
\nRETURNS\n
cipher_text: encrypted Message string
"""
# for 2x2 Hill Cipher length of key must be <= 4
# print("Warning: All Spaces with be lost!")
cipher_text = ""
key_matrix = None
if len(key) <= 4:
key_matrix = string_to_Matrix_Z26(key, 2, 2)
else:
print("Key Length must be <= 4 in 2x2 Hill Cipher")
return
pairs = math.ceil((len(message_text)/2))
matrix = string_to_Matrix_Z26(message_text, 2, pairs)
key_inverse = matrix_inverse_Z26(key_matrix)
if type(key_inverse) == type(None):
print("NOTE: The provided Key is NOT Invertible,")
print("To avoid failure while decryption,")
print("Try again with an invertible Key")
return None
for i in range(pairs):
result_char = (key_matrix*matrix[:, i]) % 26
cipher_text += ENGLISH_ALPHABETS[
result_char[0, 0]
]
cipher_text += ENGLISH_ALPHABETS[
result_char[1, 0]
]
return cipher_text | 2b36ba888980021cc69bffb1316b4e352bd026e8 | 658 |
import torch
def resnet101(pretrained=False, num_groups=None, weight_std=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], num_groups=num_groups, weight_std=weight_std, **kwargs)
if pretrained:
model_dict = model.state_dict()
if num_groups and weight_std:
pretrained_dict = torch.load('data/R-101-GN-WS.pth.tar')
overlap_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
assert len(overlap_dict) == 312
elif not num_groups and not weight_std:
pretrained_dict = model_zoo.load_url(model_urls['resnet101'])
overlap_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
else:
raise ValueError('Currently only support BN or GN+WS')
model_dict.update(overlap_dict)
model.load_state_dict(model_dict)
return model | da4213b280eeef56cf1bdce7bfd05cfc0b8cde7d | 659 |
def cron(cronline, venusian_category='irc3.plugins.cron'):
"""main decorator"""
def wrapper(func):
def callback(context, name, ob):
obj = context.context
crons = obj.get_plugin(Crons)
if info.scope == 'class':
callback = getattr(
obj.get_plugin(ob),
func.__name__)
else:
callback = irc3.utils.wraps_with_context(func, obj)
crons.add_cron(cronline, callback)
info = venusian.attach(func, callback, category=venusian_category)
return func
return wrapper | 9ffe20fd3e803b1260577ff38122b4912ab4d69a | 660 |
def oauth_type():
"""Check if Slack or another OAuth has been configured"""
if "OAUTH_TYPE" in current_app.config:
return current_app.config["OAUTH_TYPE"].lower()
else:
return None | d9fe8f77fd502890becd44cbaf802b2e94598a6f | 661 |
import random
import string
def create_categories():
"""Create a group of random strings for each column in the table."""
return [
[
''.join(random.choices(string.ascii_lowercase, k=random.randint(STR_MIN, STR_MAX)))
for _i in range(CAT_COUNT)
]
for _j in range(COL_COUNT)
] | 8552be3fb45091f404d396f452dd37824a0cae23 | 663 |
from typing import Union
from typing import Tuple
from typing import List
from typing import Any
def _compute_comm_classes(
A: Union[np.ndarray, spmatrix]
) -> Tuple[List[List[Any]], bool]:
"""Compute communication classes for a graph given by A."""
di_graph = (
nx.from_scipy_sparse_matrix(A, create_using=nx.DiGraph)
if issparse(A)
else nx.from_numpy_array(A, create_using=nx.DiGraph)
)
nx.strongly_connected_components(di_graph)
comm_classes = sorted(
nx.strongly_connected_components(di_graph), key=len, reverse=True
)
is_irreducible = len(comm_classes) == 1
return comm_classes, is_irreducible | 15a7025d10855a4644be60d52dba273c526c2b43 | 665 |
import typing
def parse_lines(lines: typing.List[str],
units: Units,
use_na: bool = True) -> typing.List[typing.Dict[str, typing.Any]]:
"""
Returns a list of parsed line dictionaries
"""
parsed_lines = []
prob = ''
while lines:
raw_line = lines[0].strip()
line = core.sanitize_line(raw_line)
# Remove prob from the beginning of a line
if line.startswith('PROB'):
# Add standalone prob to next line
if len(line) == 6:
prob = line
line = ''
# Add to current line
elif len(line) > 6:
prob = line[:6]
line = line[6:].strip()
if line:
parsed_line = (parse_na_line if use_na else parse_in_line)(line, units)
for key in ('start_time', 'end_time'):
parsed_line[key] = core.make_timestamp(parsed_line[key])
parsed_line['probability'] = core.make_number(prob[4:])
parsed_line['raw'] = raw_line
parsed_line['sanitized'] = prob + ' ' + line if prob else line
prob = ''
parsed_lines.append(parsed_line)
lines.pop(0)
return parsed_lines | 356b3c4d80a462643ab6c7747d4f1174202129be | 666 |
import random
def rand_cutout(np_img, pcts=(0.05, 0.4), depth=(1., 0.), max_k=1):
"""Cut out from image, and edges of rectangles are smooth.
Returns:
applied image, cut mask
"""
cut = np.ones(np_img.shape[:2])
k = random.randint(1, max_k)
for _ in range(k):
d = random.random() * depth[0] + depth[1]
hill = rand_solid_hill((np_img.shape[1], np_img.shape[0]), pcts=pcts)
cut = cut * (1 - d * hill)
return np_img * cut[..., np.newaxis], (cut < 0.9).astype(np.int8) | fd5b40138314827c3ff69bb571f30453049c073b | 667 |
from typing import List
def create_content_list(contents: List[str]) -> str:
"""Format list of string into markdown list
Args:
contents: (List[string]), list of string to be formatted
Returns:
String
"""
return '\n'.join(
[template.LIST_TEMPLATE.format(
level='',
content=item
) for item in contents]) | 4080d7540def0bd0199f380a787b49eafc512b6f | 668 |
from typing import Optional
import requests
import logging
def convert(key: str, content: str, output_format: OWLFormat=OWLFormat.func) -> Optional[str]:
"""
Convert content into output_format
:param key: Key of content for error reporting
:param content: OWL representation
:param output_format: target format
:return: Converted information if successful
"""
try:
resp = requests.post('https://www.ldf.fi/service/owl-converter/',
data=dict(onto=content, to=output_format.name))
except ConnectionError as e:
logging.getLogger().error(f"{key}: {str(e)}")
return None
if resp.ok:
return resp.text
logging.getLogger().error(f"{key}: {str(resp)}") | c350a00198489d6224693cf528fb952283c6fc27 | 669 |
def _post_single_image(client: Imgur, image_path, title, description=None):
"""
Limit to 1250 POST requests per hour and 12500 per day
"""
image = client.image_upload(image_path, title, description)
# album_id = client.album_get('Family Photos')['response']['data']['id']
# client.album_add(album_id, image['response']['data']['id'])
return image['response']['data']['link'] | 22b1c5996986616515e8edcfcd22d9c82df1d27b | 670 |
def load_data(path, start=0, end=99999, step=1, returnNames = False):
"""Load images into a list
#Arguments
paths: List of strings representing paths to folders containing
images that must be named as numbers
start,end,step: Refers to the number of name of images. Only loads
images with in this range.
"""
imgs = load_imgs(path,start,end,step,returnNames = returnNames)
return imgs | 7edaff4bc977a63c22f140b74a86bc9c6fdae604 | 671 |
def animated_1d_plot(probe_data_dnf: np.ndarray,
probe_data_input1: np.ndarray,
probe_data_input2: np.ndarray,
interval: ty.Optional[int] = 30) -> None:
"""Generates an animated plot for examples in the DNF regimes tutorial.
Parameters
----------
probe_data_dnf : numpy.ndarray
probe data of the DNF
probe_data_input1 : numpy.ndarray
probe data of the first spiking input
probe_data_input2 : numpy.ndarray
probe data of the second spiking input
interval : int
interval to use in matplotlib.animation.FuncAnimation
"""
probe_data_input = probe_data_input1 + probe_data_input2
probe_data_input = probe_data_input.astype(np.float)
probe_data_dnf = probe_data_dnf.astype(np.float)
probe_data_input = np.transpose(probe_data_input)
probe_data_dnf = np.transpose(probe_data_dnf)
num_neurons = np.size(probe_data_input, axis=1)
num_time_steps = np.size(probe_data_dnf, axis=0)
input_spike_rates = compute_spike_rates(probe_data_input)
dnf_spike_rates = compute_spike_rates(probe_data_dnf)
fig, ax = plt.subplots(2, 1, figsize=(10, 5))
line0, = ax[0].plot(np.zeros((num_neurons,)), 'bo-')
line1, = ax[1].plot(np.zeros((num_neurons,)), 'ro-')
im = [line0, line1]
ax[0].set_xlabel("")
ax[1].set_xlabel("Input neuron idx")
ax[0].set_ylabel("Input spike rate")
ax[1].set_ylabel("DNF spike rate")
ax[0].set_xticks([])
ax[1].set_xticks([0, num_neurons - 1])
ax[0].set_yticks([0, 1])
ax[1].set_yticks([0, 1])
ax[0].set_xlim(-1, num_neurons)
ax[1].set_xlim(-1, num_neurons)
offset = 0.1
ax[0].set_ylim(np.min(input_spike_rates) - offset,
np.max(input_spike_rates) + offset)
ax[1].set_ylim(np.min(dnf_spike_rates) - offset,
np.max(dnf_spike_rates) + offset)
plt.tight_layout()
def animate(i: int) -> ty.List:
x = range(num_neurons)
im[0].set_data(x, input_spike_rates[i, :])
im[1].set_data(x, dnf_spike_rates[i, :])
return im
anim = animation.FuncAnimation(fig,
animate,
frames=num_time_steps,
interval=interval,
blit=True)
html = display.HTML(anim.to_jshtml())
display.display(html)
plt.close() | 8ec34c6772b728daeb429a2ecf4af52ab673bc96 | 672 |
def create_tendencies(params, return_inner_products=False, return_qgtensor=False):
"""Function to handle the inner products and tendencies tensors construction.
Returns the tendencies function :math:`\\boldsymbol{f}` determining the model's ordinary differential
equations:
.. math:: \dot{\\boldsymbol{x}} = \\boldsymbol{f}(\\boldsymbol{x})
which is for the model's integration.
It returns also the linearized tendencies
:math:`\\boldsymbol{\mathrm{J}} \equiv \\boldsymbol{\mathrm{D}f} = \\frac{\partial \\boldsymbol{f}}{\partial \\boldsymbol{x}}`
(Jacobian matrix) which are used by the tangent linear model:
.. math :: \dot{\\boldsymbol{\delta x}} = \\boldsymbol{\mathrm{J}}(\\boldsymbol{x}) \cdot \\boldsymbol{\delta x}
Parameters
----------
params: ~params.params.QgParams
The parameters fully specifying the model configuration.
return_inner_products: bool
If True, return the inner products of the model. Default to False.
return_qgtensor: bool
If True, return the tendencies tensor of the model. Default to False.
Returns
-------
f: callable
The numba-jitted tendencies function.
Df: callable
The numba-jitted linearized tendencies function.
inner_products: (AtmosphericInnerProducts, OceanicInnerProducts)
If `return_inner_products` is True, the inner products of the system.
qgtensor: QgsTensor
If `return_qgtensor` is True, the tendencies tensor of the system.
"""
if params.ablocks is not None:
aip = AtmosphericInnerProducts(params)
else:
aip = None
if params.goblocks is not None and params.gotemperature_params._name == "Oceanic Temperature":
oip = OceanicInnerProducts(params)
else:
oip = None
if aip is not None and oip is not None:
aip.connect_to_ocean(oip)
agotensor = QgsTensor(aip, oip)
coo = agotensor.tensor.coords.T
val = agotensor.tensor.data
@njit
def f(t, x):
xx = np.concatenate((np.full((1,), 1.), x))
xr = sparse_mul3(coo, val, xx, xx)
return xr[1:]
jcoo = agotensor.jacobian_tensor.coords.T
jval = agotensor.jacobian_tensor.data
@njit
def Df(t, x):
xx = np.concatenate((np.full((1,), 1.), x))
mul_jac = sparse_mul2(jcoo, jval, xx)
return mul_jac[1:, 1:]
ret = list()
ret.append(f)
ret.append(Df)
if return_inner_products:
ret.append((aip, oip))
if return_qgtensor:
ret.append(agotensor)
return ret | 2ca9f8f6c52b72070c8d339a6ec23d7dd52ea66c | 673 |
import re
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords | ff70ee2690bc36aaf892653040996597750c52da | 674 |