content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import collections
import math
def compute_bleu(reference_corpus,
translation_corpus,
max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
bleu:float,翻译句子的bleu得分,
precisions:list, 包含每种ngram的准确率,
bp:brevity penalty, 短句惩罚系数,
ratio:translation_length / min(reference_length),
translation_length:int,翻译长度,
reference_length:int,最短的reference长度
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references: # 同时考虑多个references
merged_ref_ngram_counts |= _get_ngrams(reference, max_order) # 位或
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts # 位与
# matches_by_order:{len(ngram):sum of ngram overlap counts}
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
# possible_matches_by_order(可匹配n-gram总数):{len(ngram):sum of each ngram possible matches}
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
# 翻译长度惩罚(对较短的翻译基于较大的惩罚,以防止短翻译准确率会更高的问题)
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length) | 4a7f45ea988e24ada554b38cea84083effe164bd | 3,658,953 |
def rf_agg_local_mean(tile_col):
"""Compute the cellwise/local mean operation between Tiles in a column."""
return _apply_column_function('rf_agg_local_mean', tile_col) | d65f6c7de674aac10ee91d39c8e5bc4ea6284e58 | 3,658,954 |
import json
def Shot(project, name):
"""
샷 정보를 가지고 오는 함수.
(딕셔너리, err)값을 반환한다.
"""
restURL = "http://10.0.90.251/api/shot?project=%s&name=%s" % (project, name)
try:
data = json.load(urllib2.urlopen(restURL))
except:
return {}, "RestAPI에 연결할 수 없습니다."
if "error" in data:
return {}, data["error"]
return data["data"], None | 6bd7ac1e3663faf8b120c03a1e873255557bc30d | 3,658,955 |
def inversion_double(in_array):
"""
Get the input boolean array along with its element-wise logical not beside it. For error correction.
>>> inversion_double(np.array([1,0,1,1,1,0,0,1], dtype=np.bool))
array([[ True, False, True, True, True, False, False, True],
[False, True, False, False, False, True, True, False]])
"""
return np.stack((in_array, np.logical_not(in_array))) | 84253bdec88d665ad8f68b0eb252f3111f4a91ac | 3,658,956 |
def solution(N):
"""
This is a fairly simple task.
What we need to do is:
1. Get string representation in binary form (I love formatted string literals)
2. Measure biggest gap of zeroes (pretty self explanatory)
"""
# get binary representation of number
binary_repr = f"{N:b}"
# initialise counters
current_gap, max_gap = 0, 0
for b in binary_repr:
# end of gap, update max
if b == '1':
max_gap = max(current_gap, max_gap)
current_gap = 0
# increase gap counter
else:
current_gap += 1
return max_gap | 54b9dffe219fd5d04e9e3e3b07e4cb0120167a6f | 3,658,957 |
from typing import Tuple
def distributed_compute_expectations(
building_blocks: Tuple[cw.ComplexDeviceArray],
operating_axes: Tuple[Tuple[int]],
pbaxisums: Tuple[Tuple[cw.ComplexDeviceArray]],
pbaxisums_operating_axes: Tuple[Tuple[Tuple[int]]],
pbaxisum_coeffs: Tuple[Tuple[float]],
num_discretes: int,
) -> ShardedDeviceArray:
"""
Compute the expectation values of several observables given in `pbaxisums`.
This function uses a single pmap and can be memory intesive for
pbaxisums with many long prob-basis-axis-strings.
Args:
building_blocks: The building_blocks in super-matrix format (i.e. 128x128)
operating_axes: The discrete axes on which `building_blocks` act.
pbaxisums: Supermatrices of large_block representation of pauli sum
operators. A single pbaxistring is represented as an innermost list
of matrix-large_blocks. The outermost list iterates through different
prob-basis-axis-sums, the intermediate list iterates through pbaxistrings
within a pbaxisum.
pbaxisums_operating_axes: The discrete axes on which the pbaxisums act.
pbaxisum_coeffs: The coefficients of the
prob-basis-axis-strings appearing in the union of all prob-basis-axis-sum operators.
num_discretes: The number of discretes needed for the simulation.
num_params: The number of parameters on which the acyclic_graph depends.
Returns:
ShardedDeviceArray: The expectation values.
"""
num_pbaxisums = len(pbaxisums)
expectation_values = jnp.zeros(num_pbaxisums)
final_state = helpers.get_final_state(building_blocks, operating_axes, num_discretes)
for m, pbaxisum in enumerate(pbaxisums):
pbaxisum_op_axes = pbaxisums_operating_axes[m]
pbaxisum_coeff = pbaxisum_coeffs[m]
# `psi` is brought into natural discrete order
# don't forget to also align the axes here!
coeff = pbaxisum_coeff[0]
psi = helpers.apply_building_blocks(
final_state, pbaxisum[0], pbaxisum_op_axes[0]
).align_axes()
expectation_value = (
helpers.scalar_product_real(psi, final_state) * coeff
)
for n in range(1, len(pbaxisum)):
pbaxistring = pbaxisum[n]
op_axes = pbaxisum_op_axes[n]
coeff = pbaxisum_coeff[n]
psi = helpers.apply_building_blocks(
final_state, pbaxistring, op_axes
).align_axes()
expectation_value += (
helpers.scalar_product_real(psi, final_state) * coeff
)
# at this point all `psis` are in natural discrete ordering,
# with the same `labels` values as `final_state` (i.e.
# `labels = [0,1,2,3,..., num_discretes - 1]`). They also all have the
# same (sorted) `perm` ordering due to the call to `align_axes()`.
# compute the expectation values. Note that `psi` and `final_state`
# have identical `perm` and `labels`.
expectation_values = expectation_values.at[m].set(
expectation_value.real[0]
)
return expectation_values | d26c7595c604f7c52b3546083837de35ef4b4202 | 3,658,958 |
def extractStudents(filename):
"""
Pre: The list in xls file is not empty
Post: All students are extract from file
Returns students list
"""
list = []
try:
# open Excel file
wb = xlrd.open_workbook(str(filename))
except IOError:
print ("Oops! No file "+filename+ " has been found !")
else:
sh = wb.sheet_by_name(wb.sheet_names()[0])
for rownum in range(1,sh.nrows):#1 to remove title line
student = sh.row_values(rownum)
list.append(student)
return list | e10d942c4e1742b4e8de9ec6a1248f27b2a4b1d5 | 3,658,959 |
def clean_ip(ip):
"""
Cleans the ip address up, useful for removing leading zeros, e.g.::
1234:0:01:02:: -> 1234:0:1:2::
1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a
1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1::
0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0
:type ip: string
:param ip: An IP address.
:rtype: string
:return: The cleaned up IP.
"""
theip = normalize_ip(ip)
segments = ['%x' % int(s, 16) for s in theip.split(':')]
# Find the longest consecutive sequence of zeroes.
seq = {0: 0}
start = None
count = 0
for n, segment in enumerate(segments):
if segment != '0':
start = None
count = 0
continue
if start is None:
start = n
count += 1
seq[count] = start
# Replace those zeroes by a double colon.
count = max(seq)
start = seq[count]
result = []
for n, segment in enumerate(segments):
if n == start and count > 1:
if n == 0:
result.append('')
result.append('')
if n == 7:
result.append('')
continue
elif start < n < start + count:
if n == 7:
result.append('')
continue
result.append(segment)
return ':'.join(result) | f0828e793a3adfef536bf7cb76d73a9af097aa00 | 3,658,961 |
def meta_caption(meta) -> str:
"""makes text from metadata for captioning video"""
caption = ""
try:
caption += meta.title + " - "
except (TypeError, LookupError, AttributeError):
pass
try:
caption += meta.artist
except (TypeError, LookupError, AttributeError):
pass
return caption | 6ef117eb5d7a04adcee25a755337909bfe142014 | 3,658,963 |
def ticket_id_correctly_formatted(s: str) -> bool:
"""Checks if Ticket ID is in the form of 'PROJECTNAME-1234'"""
return matches(r"^\w+-\d+$|^---$|^-$")(s) | 2bb1624ac2080852badc6ab2badcb2e1229f5fcc | 3,658,964 |
def test_1():
"""
f(x) = max(.2, sin(x)^2)
"""
test_graph = FunctionTree('Test_1')
max_node = Max('max')
const_node = Constant('0.2', .2)
square_node = Square('square')
sin_node = Sin('sin')
test_graph.insert_node(max_node, 'Output', 'x')
test_graph.insert_node(square_node, 'max', 'x')
test_graph.insert_node(const_node, 'max')
test_graph.insert_node(sin_node, 'square', 'x')
return test_graph | c6b47e386cdb7caa2290df2250fee3ad6aecbab7 | 3,658,965 |
def export_vector(vector, description, output_name, output_method='asset'):
"""Exports vector to GEE Asset in GEE or to shapefile
in Google Drive.
Parameters
----------
vector : ee.FeatureCollection
Classified vector segments/clusters.
description : str
Description of the exported layer.
output_name : str
Path for the output file. Path must exist within
Google Earth Engine Assets path or Google Drive.
output_method : str
Export method/destination. Options include 'asset' for
export to Google Earth Engine Assets or 'drive' for
export to Google Drive.
Returns
-------
output_message : str
Message indicating location of the exported layer.
Example
-------
>>> import ee
>>> peak_green = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170602')
>>> post_harvest = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170906')
>>> image_collection = ee.ImageCollection([peak_green, post_harvest])
>>> ndvi_diff = ndvi_diff_landsat8(image_collection, 1, 0)
>>> study_area_boundary = ee.FeatureCollection("users/calekochenour/vegetation-change/drtt_study_area_boundary")
>>> ndvi_change_thresholds = [-2.0, -0.5, -0.5, -0.35]
>>> change_features = segment_snic(ndvi_diff, study_area_boundary, ndvi_change_thresholds)
>>> change_primary_vector = raster_to_vector(change_features.get('primary'), study_area_boundary)
>>> change_secondary_vector = raster_to_vector(change_features.get('secondary'), study_area_boundary)
>>> change_primary_export = export_vector(vector=change_primary_vector, description='Primary Change', output_name=change_primary_asset_name, output_method='asset'
>>> change_secondary_export = export_vector(vector=change_secondary_vector, description='Secondary Change', output_name=change_secondary_asset_name, output_method='asset')
"""
# Create export task for Google Drive
if output_method.lower() == "drive":
# Export vectors as shapefile to Google Drive
task = ee.batch.Export.table.toDrive(**{
'collection': vector,
'description': output_name,
'fileFormat': 'SHP'})
# Assign output message
output_message = f"Exporting {output_name.split('/')[-1]} to Google Drive..."
# Create task for GEE Asset
elif output_method.lower() == "asset":
# Export vectors to GEE Asset
task = ee.batch.Export.table.toAsset(**{
'collection': vector,
'description': description,
'assetId': output_name})
# Assign output message
output_message = f"Exporting {output_name.split('/')[-1]} to GEE Asset..."
else:
# Rasie error
raise ValueError("Invalid export method. Please specify 'Drive' or 'Asset'.")
# Start export task
task.start()
# Return output message
return print(output_message) | 19cfa1a907aec4f25b1d8392f02a628f9e07ed7c | 3,658,966 |
def optimize_centers_mvuiq(A, B, Q, centers, keep_sparsity=True):
""" minimize reconstruction error after weighting by matrix A and make it unbiased
min_{c_i} \|A.(\sum_i Q_i c_i) - B\|_F^2 such that sum(B-A(\sum_i Q_i c_i)) = 0
"""
num_levels = len(centers)
thr = sla.norm(A) * 1e-6
# 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process
valid_idx = []
AQ = [np.zeros(1) for _ in range(num_levels)]
for i in range(num_levels):
AQ[i] = np.matmul(A, Q == i)
if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity):
# check whether the i-th bin has any effect on the quantization performance and
# do not consider sparse values (center=0)
valid_idx += [i]
if not valid_idx:
return
# 2- find the optimum reconstruction points for the non-empty quantization bins
# 2.a- create matrix M, used in the optimization problem
num_valid = len(valid_idx)
d = np.sum(B)
f = np.zeros(num_valid)
M = np.zeros(shape=(num_valid, num_valid))
e = np.zeros(shape=num_valid)
for r in range(num_valid):
f[r] = np.sum(AQ[valid_idx[r]])
for c in range(r, num_valid):
# trace(AQ[valid_idx[c]].T @ AQ[valid_idx[r]])
M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]])
M[c, r] = M[r, c]
# trace(B.T @ AQ[valid_idx[r]])
e[r] = np.sum(AQ[valid_idx[r]] * B)
# 2.b- solve for min |Mx-e| such that fx=d
if num_valid == 0:
v = 0
elif num_valid == 1:
v = d / f[0]
elif num_valid == 2:
# for the special binary case, the solution can be found easily
scale = sla.norm(f) + 1e-12
f /= scale
d /= scale
u = np.array([-f[1], f[0]])
a = (e - d * M.dot(f)).dot(u) / (M.dot(u).dot(u) + 1e-12)
v = d * f + a * u
else:
# use quadratic programming (Goldfarb-Idnani algorithm) to solve the problem
d = np.array([d]).astype(np.float)
f = np.reshape(f, newshape=(-1, 1))
v = quadprog.solve_qp(M, e, f, d, 1)[0]
# 3- copy the found center points
centers[valid_idx] = v
return centers | 5a059bf9a88ed31a6cc75cecd2b0f7ef4273c5af | 3,658,967 |
def container_instance_task_arns(cluster, instance_arn):
"""Fetch tasks for a container instance ARN."""
arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns']
return arns | ca5f0be6aa054f7d839435a8c32c395429697639 | 3,658,968 |
def benchmark(pipelines=None, datasets=None, hyperparameters=None, metrics=METRICS, rank='f1',
distributed=False, test_split=False, detrend=False, output_path=None):
"""Evaluate pipelines on the given datasets and evaluate the performance.
The pipelines are used to analyze the given signals and later on the
detected anomalies are scored against the known anomalies using the
indicated metrics.
Finally, the scores obtained with each metric are averaged accross all the signals,
ranked by the indicated metric and returned on a ``pandas.DataFrame``.
Args:
pipelines (dict or list): dictionary with pipeline names as keys and their
JSON paths as values. If a list is given, it should be of JSON paths,
and the paths themselves will be used as names. If not give, all verified
pipelines will be used for evaluation.
datasets (dict or list): dictionary of dataset name as keys and list of signals as
values. If a list is given then it will be under a generic name ``dataset``.
If not given, all benchmark datasets will be used used.
hyperparameters (dict or list): dictionary with pipeline names as keys
and their hyperparameter JSON paths or dictionaries as values. If a list is
given, it should be of corresponding order to pipelines.
metrics (dict or list): dictionary with metric names as keys and
scoring functions as values. If a list is given, it should be of scoring
functions, and they ``__name__`` value will be used as the metric name.
If not given, all the available metrics will be used.
rank (str): Sort and rank the pipelines based on the given metric.
If not given, rank using the first metric.
distributed (bool): Whether to use dask for distributed computing. If not given,
use ``False``.
test_split (bool or float): Whether to use the prespecified train-test split. If
float, then it should be between 0.0 and 1.0 and represent the proportion of
the signal to include in the test split. If not given, use ``False``.
detrend (bool): Whether to use ``scipy.detrend``. If not given, use ``False``.
output_path (str): Location to save the intermediatry results. If not given,
intermediatry results will not be saved.
Returns:
pandas.DataFrame: Table containing the scores obtained with
each scoring function accross all the signals for each pipeline.
"""
pipelines = pipelines or VERIFIED_PIPELINES
datasets = datasets or BENCHMARK_DATA
if isinstance(pipelines, list):
pipelines = {pipeline: pipeline for pipeline in pipelines}
if isinstance(datasets, list):
datasets = {'dataset': datasets}
if isinstance(hyperparameters, list):
hyperparameters = {pipeline: hyperparameter for pipeline, hyperparameter in
zip(pipelines.keys(), hyperparameters)}
if isinstance(metrics, list):
metrics_ = dict()
for metric in metrics:
if callable(metric):
metrics_[metric.__name__] = metric
elif metric in METRICS:
metrics_[metric] = METRICS[metric]
else:
raise ValueError('Unknown metric: {}'.format(metric))
metrics = metrics_
results = _evaluate_datasets(
pipelines, datasets, hyperparameters, metrics, distributed, test_split, detrend)
if output_path:
LOGGER.info('Saving benchmark report to %s', output_path)
results.to_csv(output_path)
return _sort_leaderboard(results, rank, metrics) | 09e7ebda30d0e9eec1b11a68fbc566bf8f39d841 | 3,658,969 |
def notNone(arg,default=None):
""" Returns arg if not None, else returns default. """
return [arg,default][arg is None] | 71e6012db54b605883491efdc389448931f418d0 | 3,658,970 |
def get_scorer(scoring):
"""Get a scorer from string
"""
if isinstance(scoring, str) and scoring in _SCORERS:
scoring = _SCORERS[scoring]
return _metrics.get_scorer(scoring) | fbf1759ae4c6f93be036a6af479de89a732bc520 | 3,658,971 |
from typing import Iterator
def triangle_num(value: int) -> int:
"""Returns triangular number for a given value.
Parameters
----------
value : int
Integer value to use in triangular number calculaton.
Returns
-------
int
Triangular number.
Examples:
>>> triangle_num(0)
0
>>> triangle_num(1)
1
>>> triangle_num(4)
10
>>> triangle_num(10)
55
>>> triangle_num("A")
Traceback (most recent call last):
...
TypeError: '>' not supported between instances of 'str' and 'int'
>>> triangle_num(-1)
Traceback (most recent call last):
...
TypeError: Please use positive integer value
"""
if value >= 0:
tot : list = [0]
def recur(n: int, t: list) -> Iterator:
if n > 0:
t[0] += n
n -= 1
return recur(n, t)
recur(value, tot)
return tot[0]
raise ValueError("Please use positive integer value.") | f22554b2c220d368b1e694021f8026162381a7d0 | 3,658,972 |
import torch
def locations_sim_euclidean(image:DataBunch, **kwargs):
"""
A locations similarity function that uses euclidean similarity between vectors. Predicts the anatomical locations of
the input image, and then returns the eucliean similarity between the input embryo's locations vector and the
locations vectors of the database embryos.
Euclidean similarity and distance are computed between unnormalized, one-hot locations vectors. The euclidean
similarity between two locations vectors is defined as 1/(1 + euclidean distance).
Arguments:
- image: The input image DataBunch
Returns:
A tensor of similarity values (one for each database image). Each similarity score is the euclidean similarity between
locations vectors.
"""
locations_pred = run_inference(image, do_stage=False)[0]
_, database_image_locations = retrieve_predictions()
euclidean_distance = torch.norm(database_image_locations-locations_pred, dim=1).unsqueeze(1)
return 1/(1+euclidean_distance) | d45c33641ac6327963f0634878c99461de9c1052 | 3,658,973 |
def _butter_bandpass_filter(data, low_cut, high_cut, fs, axis=0, order=5):
"""Apply a bandpass butterworth filter with zero-phase filtering
Args:
data: (np.array)
low_cut: (float) lower bound cutoff for high pass filter
high_cut: (float) upper bound cutoff for low pass filter
fs: (float) sampling frequency in Hz
axis: (int) axis to perform filtering.
order: (int) filter order for butterworth bandpass
Returns:
bandpass filtered data.
"""
nyq = 0.5 * fs
b, a = butter(order, [low_cut / nyq, high_cut / nyq], btype="band")
return filtfilt(b, a, data, axis=axis) | 706770bbf78e103786a6247fc56df7fd8b41665a | 3,658,974 |
def transform_and_normalize(vecs, kernel, bias):
"""应用变换,然后标准化
"""
if not (kernel is None or bias is None):
vecs = (vecs + bias).dot(kernel)
return vecs / (vecs**2).sum(axis=1, keepdims=True)**0.5 | bb32cd5c74df7db8d4a6b6e3ea211b0c9b79db47 | 3,658,975 |
def mpesa_response(r):
"""
Create MpesaResponse object from requests.Response object
Arguments:
r (requests.Response) -- The response to convert
"""
r.__class__ = MpesaResponse
json_response = r.json()
r.response_description = json_response.get('ResponseDescription', '')
r.error_code = json_response.get('errorCode')
r.error_message = json_response.get('errorMessage', '')
return r | e416030d39411ce19aee28735465ba035461f802 | 3,658,976 |
def swap_flies(dataset, indices, flies1=0, flies2=1):
"""Swap flies in dataset.
Caution: datavariables are currently hard-coded!
Caution: Swap *may* be in place so *might* will alter original dataset.
Args:
dataset ([type]): Dataset for which to swap flies
indices ([type]): List of indices at which to swap flies.
flies1 (int or list/tuple, optional): Either a single value for all indices or a list with one value per item in indices. Defaults to 0.
flies2 (int or list/tuple, optional): Either a single value for all indices or a list with one value per item in indices. Defaults to 1.
Returns:
dataset with swapped indices ()
"""
for cnt, index in enumerate(indices):
if isinstance(flies1, (list, tuple)) and isinstance(flies2, (list, tuple)):
fly1, fly2 = flies1[cnt], flies2[cnt]
else:
fly1, fly2 = flies1, flies2
if 'pose_positions_allo' in dataset:
dataset.pose_positions_allo.values[index:, [fly2, fly1], ...] = dataset.pose_positions_allo.values[index:, [fly1, fly2], ...]
if 'pose_positions' in dataset:
dataset.pose_positions.values[index:, [fly2, fly1], ...] = dataset.pose_positions.values[index:, [fly1, fly2], ...]
if 'body_positions' in dataset:
dataset.body_positions.values[index:, [fly2, fly1], ...] = dataset.body_positions.values[index:, [fly1, fly2], ...]
return dataset | 1f1941d8d6481b63efd1cc54fcf13f7734bccf8b | 3,658,977 |
def periodic_kernel(avetoas, log10_sigma=-7, log10_ell=2,
log10_gam_p=0, log10_p=0):
"""Quasi-periodic kernel for DM"""
r = np.abs(avetoas[None, :] - avetoas[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * 86400
p = 10**log10_p * 3.16e7
gam_p = 10**log10_gam_p
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2) + d
return K | 14dc89fbbf501ee42d7778bd14a9e35d22bc69ea | 3,658,978 |
def emails(request):
"""
A view to send emails out to hunt participants upon receiving a valid post request as well as
rendering the staff email form page
"""
teams = Hunt.objects.get(is_current_hunt=True).real_teams
people = []
for team in teams:
people = people + list(team.person_set.all())
email_list = [person.user.email for person in people]
if request.method == 'POST':
email_form = EmailForm(request.POST)
if email_form.is_valid():
subject = email_form.cleaned_data['subject']
message = email_form.cleaned_data['message']
email_to_chunks = [email_list[x: x + 80] for x in range(0, len(email_list), 80)]
for to_chunk in email_to_chunks:
email = EmailMessage(subject, message, 'puzzlehuntcmu@gmail.com', [], to_chunk)
email.send()
return HttpResponseRedirect('')
else:
email_form = EmailForm()
context = {'email_list': (', ').join(email_list), 'email_form': email_form}
return render(request, 'email.html', add_apps_to_context(context, request)) | 93cc8099e8f73b2607ab736a2aae4ae59ca1fe4d | 3,658,979 |
def _stochastic_universal_sampling(parents: Population, prob_distribution: list, n: int):
"""
Stochastic universal sampling (SUS) algorithm. Whenever more than one sample is to be drawn from the distribution
the use of the stochastic universal sampling algorithm is preferred compared to roulette wheel algorithm.
Parameters
----------
:param parents: beagle.Population
Population from which n individuals are going to be selected.
:param prob_distribution: list
Cumulative probability distribution.
:param n: int
Length of the selected population.
Returns
-------
:return: list of beagle.Individual
Selected individuals.
Exceptions
-----------
:raise Exception
If the algorithm enters an infinite loop because random_num is greater than 1 an exception will occur.
"""
current_member, i = 0, 0
mating_pool = [None] * n
random_num = np.random.uniform(low=0, high=(1/n))
while current_member < n:
while random_num <= prob_distribution[i]:
mating_pool[current_member] = parents[i]
random_num += 1 / n
current_member += 1
if random_num > 1:
raise Exception(
'The SUS algorithm has entered an infinite loop. Verify that the selected population '
'sizes are suitable for this type of operator.')
i += 1
mating_pool = [deepcopy(individual) for individual in mating_pool] # Make a deepcopy of each selected individual
return mating_pool | fb6b58cbdedbd133a7ba72470c2fc6586265ed4c | 3,658,980 |
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc',
xd=False, yd=False, dualy=False, other_ftypes=True):
"""Add a simple subparser."""
parser = _add_procparser(subparsers, name, helpstr, func, defname=defname)
_add_def_args(parser, xd=xd, yd=yd, dualy=dualy)
return parser | d7ba916453921d4ad362367c43f597f81fb2db9b | 3,658,982 |
def comprspaces(*args):
"""
.. function:: comprspaces(text1, [text2,...]) -> text
This function strips (from the beginning and the end) and compresses
the spaces in its input.
Examples:
>>> table1('''
... ' an example with spaces ' 'another example with spaces '
... ''')
>>> sql("select comprspaces(a,b) from table1")
comprspaces(a,b)
--------------------------------------------------
an example with spaces another example with spaces
"""
if len(args) == 1:
return reduce_spaces.sub(' ', strip_remove_newlines.sub('', args[0]))
out=[]
for i in args:
o=reduce_spaces.sub(' ', strip_remove_newlines.sub('', i))
out+=[o]
return ' '.join(out) | 7cf4d23dac7fb0d36f9224598f103b5918167bd5 | 3,658,985 |
import socket
def find_available_port():
"""Find an available port.
Simple trick: open a socket to localhost, see what port was allocated.
Could fail in highly concurrent setups, though.
"""
s = socket.socket()
s.bind(('localhost', 0))
_address, port = s.getsockname()
s.close()
return port | 1d81ff79fa824bc8b38c121a632890973f0639ea | 3,658,986 |
def merge_deep(dct1, dct2, merger=None):
"""
Deep merge by this spec below
:param dct1:
:param dct2:
:param merger Optional merger
:return:
"""
my_merger = merger or Merger(
# pass in a list of tuples,with the
# strategies you are looking to apply
# to each type.
[
(list, ["append"]),
(dict, ["merge"])
],
# next, choose the fallback strategies,
# applied to all other types:
["override"],
# finally, choose the strategies in
# the case where the types conflict:
["override"]
)
return my_merger.merge(dct1, dct2) | 1257e7a8242fde6a70feb3cfe373979bbf439726 | 3,658,988 |
def step(
context, bind_to, data, title='', area=False, x_is_category=False,
labels=False, vertical_grid_line=False, horizontal_grid_line=False,
show_legend=True, zoom=False, group_tooltip=True, height=None,
width=None
):
"""Generates javascript code to show a 'step' chart.
Args:
context: Context of template.
bind_to: A string that specifics an HTML element (eg: id or class)
that chart will be shown in that. (like: '#chart')
data: It is dictinary that contains data of chart, some
informations about extra lines, grouping of data and
chart axis labels. eg:
{
'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'],
'horizontal_lines': [40],
# 'vertical_lines': [40],
'data': [
{'title': 'A', 'values': [26, 35, 52, 34, 45, 74],
'color': '#FF34FF'},
# {'title': 'B', 'values': [54, 25, 52, 26, 20, 89]},
],
# 'groups': [('A', 'B')]
}
vertical_lines works just if x_is_category seted to False.
title: A string that will be shown on top of the chart.
area: It's a boolean option. If true, the area under the curve
will be colored.
x_is_category: It's a boolean option. If false, labels of X axis
will be considered as real number and sortable. (they will
be sorted automatically)
labels: It's a boolean option. If true, value of record will be
shown on column.
vertical_grid_line: It's boolean option, If true some vertical rows
will be drawn in chart. (grid lines)
horizontal_grid_line: It's boolean option, If true some horizontal
rows will be drawn in chart. (grid lines)
show_legend: It's boolean option, If false, legends of the chart
will be hidden.
zoom: It's boolean option, If true, end user can scroll on
chart to zoom in and zoom out.
group_tooltip: It's boolean option, If true, data of all records
in that point whill be shown to gather.
height: It's an integer option, it will determine heigth of chart
in pixel.
width: It's an integer option, it will determine width of chart
in pixel.
Returns:
A string contains chart js code and import code of C3 static files, if
it did not imported yet.
You can see structure of chart in chart_structur variable.
"""
# step chart structure in JS
chart_structur = (
'\n<script type="text/javascript">'
'\n var chart = c3.generate({'
'\n bindto: "%s",'
'\n data: {'
'\n x: %s,'
'\n columns: ['
'\n %s'
'\n ],'
'\n type : "%s",'
'\n colors: {'
'\n %s'
'\n },'
'\n groups: ['
'\n %s'
'\n ],'
'\n labels : %s'
'\n },'
'\n title: { text: "%s"},'
'\n axis: { x: { type: "%s" } },'
'\n grid: {'
'\n x: { show: %s ,lines: [%s] },'
'\n y: { show: %s ,lines: [%s] },'
'\n },'
'\n legend: { show: %s },'
'\n zoom: { enabled: %s },'
'\n tooltip: { grouped: %s },'
'\n size: { height: %s, width: %s }'
'\n });'
'\n</script>'
)
# convert parameters to strings to be acceptable in JS and C3 syntax.
if area:
_type = 'area-step'
else:
_type = 'step'
if x_is_category:
x_type = 'category'
else:
x_type = ''
if labels:
labels = 'true'
else:
labels = 'false'
if vertical_grid_line:
vertical_grid_line = 'true'
else:
vertical_grid_line = 'false'
if horizontal_grid_line:
horizontal_grid_line = 'true'
else:
horizontal_grid_line = 'false'
if show_legend:
show_legend = 'true'
else:
show_legend = 'false'
if zoom:
zoom = 'true'
else:
zoom = 'false'
if group_tooltip:
group_tooltip = 'true'
else:
group_tooltip = 'false'
if height is not None:
height = int(height)
else:
height = 'null'
if width is not None:
width = int(width)
else:
width = 'null'
# read horizontal line points from data
horizontal_lines = str()
if 'horizontal_lines' in data.keys():
for line in data['horizontal_lines']:
horizontal_lines = ''.join([horizontal_lines,
'{ value: %s}' % line, ','])
# read vertical line points from data
# raise an exception if x_is_category set to true and vertical_lines exists
vertical_lines = str()
if 'vertical_lines' in data.keys():
if x_is_category:
raise Exception(
"It's meaningless to use vertical_lines with x_is_category."
)
for line in data['vertical_lines']:
vertical_lines = ''.join(
[vertical_lines, '{ value: %s}' % line, ','])
# reads 'x' field of data and creates X axis labels.
# a hash is used to naming X axis labels
x_labels = str()
if 'x' in data.keys():
if x_is_category:
x_labels = data['x']
else:
x_labels = list(filter(lambda x: int(x), data['x']))
x_labels = ','.join([repr(str(label)) for label in x_labels])
x_labels = '["2d2014226823e74c2accfcce8e0ca141", %s],' % x_labels
x_label_list_name = '"2d2014226823e74c2accfcce8e0ca141"'
else:
x_labels = ''
x_label_list_name = "null"
# read records points to draw on chart
data_title_list = list()
chart_data = str()
for item in data['data']:
values = ','.join([str(v) for v in item['values']])
item_data = '["%s", %s], ' % (item['title'], values)
chart_data = ' '.join([chart_data, item_data])
data_title_list.append(item['title'])
# add X axis labels to chart data
chart_data = ''.join([chart_data, x_labels])
# read colors of data
chart_color = str()
for item in data['data']:
if 'color' in item.keys():
item_color = '"%s": "%s", ' % (item['title'], item['color'])
chart_color = ' '.join([chart_color, item_color])
# read grouping details of data
total_group_string = str()
if 'groups' in data.keys():
for group in data['groups']:
group_string = str()
for item in group:
# raise an exception if mentioned key were not exist in data
if item not in data_title_list:
raise ValueError("%s is not exists in your data!" % item)
group_string = ''.join([group_string, ',', repr(item)])
total_group_string = ''.join(
[total_group_string, '[', group_string, ']', ','])
# pass arguments to chart structure
chart = chart_structur % (
bind_to, x_label_list_name,
chart_data, _type, chart_color, total_group_string, labels,
title, x_type, vertical_grid_line, vertical_lines,
horizontal_grid_line, horizontal_lines, show_legend, zoom,
group_tooltip, height, width
)
# add import C3 elements to it, if it does not imported yet and return it.
if not ('import_js_c3' in context and context['import_js_c3']):
context['import_js_c3'] = True
return mark_safe('%s\n%s' % (import_c3(), chart))
else:
return mark_safe(chart) | e135f1315dc635cc12dec403b3b6a268ed1c0a2b | 3,658,989 |
from typing import List
def get_baseline_y(line: PageXMLTextLine) -> List[int]:
"""Return the Y/vertical coordinates of a text line's baseline."""
if line_starts_with_big_capital(line):
return [point[1] for point in line.baseline.points if point[1] < line.baseline.bottom - 20]
else:
return [point[1] for point in line.baseline.points] | 7195f801e3012f5514b0d4eea7d5df9a36764412 | 3,658,991 |
import time
def get_device_type(dev, num_try=1):
""" Tries to get the device type with delay """
if num_try >= MAX_DEVICE_TYPE_CHECK_RETRIES:
return
time.sleep(1) # if devtype is checked to early it is reported as 'unknown'
iface = xwiimote.iface(dev)
device_type = iface.get_devtype()
if not device_type or device_type == 'unknown':
return get_device_type(dev, num_try + 1)
return device_type | 0caec78baeeb3da7ba3b99d68d80b9d1439af294 | 3,658,992 |
def index():
"""
This is the grocery list.
Concatenates the ingredients from all the upcoming recipes
The ingredients dict that we pass to the template has this structure
{
"carrot": {
"g": 200,
"number": 4,
"str": "200g, 4number",
},
"salt": {
"g": 20,
"pinch": 3,
"str": "20g, 3pinch",
},
}
If two ingredients have the same unit, I add the quantities, but trying to
unify all the different ways of expressing ingredient units would be a lost cause.
We add the str key because doing formatting work in the template is so much fun
"""
recipes = Recipe.query.filter_by(upcoming=True)
ingredients = dict()
for recipe in recipes:
recipe_d = recipe.to_dict()
for ingredient in recipe_d["ingredients"]:
#worth changing the ingredients to a named tuple ?
#would be better at least here
name, unit, quantity = (ingredient["name"],
ingredient["unit"],
ingredient["quantity"])
quantity = quantity * recipe.upcoming_servings / recipe.servings
if name in ingredients:
if unit in ingredients[name]:
ingredients[name][unit] += quantity
else:
ingredients[name][unit] = quantity
else:
ingredients[name] = {
unit: quantity,
}
for name, d in ingredients.items():
s = ", ".join("{:g}{}".format(
round(quantity, 2), unit) for unit, quantity in d.items())
ingredients[name]["str"] = s
return render_template("grocery_list.html",
title="Grocery list",
recipes=recipes,
ingredients=ingredients) | 343f54d097c95e92bbca1bbe087168a348d42771 | 3,658,993 |
def test_Fit_MinFunc():
"""
There are times where I don't pass just a simple function to the fitting algorithm.
Instead I need to calculate the error myself and pass that to the model. This tests
that ability.
"""
init = {
'm': 20,
'b': -10
}
def func(X, *args):
vecLinear = np.vectorize(funcs.linear)
yThr = vecLinear(linearData['X'], *args)
return np.sqrt(np.sum((linearData['Y'] - yThr) ** 2))
LinMod = model(func)
LinMod.setParams(init)
LinMod.fit(linearData['X'], linearData['Y'])
results = LinMod.parameters.copy()
for key in linearParams.keys():
error = np.abs((results[key]-linearParams[key])/linearParams[key])*100
assert error < 15 | 4884302ad03cb04e4d293e05b743f1d2aaf51141 | 3,658,994 |
def BOP(data):
"""
Balance of Power Indicator
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('BOP')
return fn(data) | 14502e0c1fd6f5224edfa403ae58e75a4056c74c | 3,658,995 |
def get_infinite(emnist_client_data, num_pseudo_clients):
"""Converts a Federated EMNIST dataset into an Infinite Federated EMNIST set.
Infinite Federated EMNIST expands each writer from the EMNIST dataset into
some number of pseudo-clients each of whose characters are the same but apply
a fixed random affine transformation to the original user's characters. The
distribution over affine transformation is approximately equivalent to the one
described at https://www.cs.toronto.edu/~tijmen/affNIST/. It applies the
following transformations in this order:
1. A random rotation chosen uniformly between -20 and 20 degrees.
2. A random shearing adding between -0.2 to 0.2 of the x coordinate to the
y coordinate (after centering).
3. A random scaling between 0.8 and 1.25 (sampled log uniformly).
4. A random translation between -5 and 5 pixels in both the x and y axes.
Args:
emnist_client_data: The `tff.simulation.ClientData` to convert.
num_pseudo_clients: How many pseudo-clients to generate for each real
client. Each pseudo-client is formed by applying a given random affine
transformation to the characters written by a given real user. The first
pseudo-client for a given user applies the identity transformation, so the
original users are always included.
Returns:
An expanded `tff.simulation.ClientData`.
"""
num_client_ids = len(emnist_client_data.client_ids)
return transforming_client_data.TransformingClientData(
raw_client_data=emnist_client_data,
make_transform_fn=_make_transform_fn,
num_transformed_clients=(num_client_ids * num_pseudo_clients)) | 68b4ed0643e48adba2478022eff10a52222f75df | 3,658,996 |
def create_plot(df, title, carbon_unit, cost_unit, ylimit=None):
"""
:param df:
:param title: string, plot title
:param carbon_unit: string, the unit of carbon emissions used in the
database/model, e.g. "tCO2"
:param cost_unit: string, the unit of cost used in the database/model,
e.g. "USD"
:param ylimit: float/int, upper limit of y-axis; optional
:return:
"""
if df.empty:
return figure()
# Set up data source
source = ColumnDataSource(data=df)
# Determine column types for plotting, legend and colors
# Order of stacked_cols will define order of stacked areas in chart
x_col = "period"
line_col = "carbon_cap"
stacked_cols = ["in_zone_project_emissions", "import_emissions_degen"]
# Stacked Area Colors
colors = ["#666666", "#999999"]
# Set up the figure
plot = figure(
plot_width=800,
plot_height=500,
tools=["pan", "reset", "zoom_in", "zoom_out", "save", "help"],
title=title,
x_range=df[x_col]
# sizing_mode="scale_both"
)
# Add stacked bar chart to plot
bar_renderers = plot.vbar_stack(
stackers=stacked_cols,
x=x_col,
source=source,
color=colors,
width=0.5,
)
# Add Carbon Cap target line chart to plot
target_renderer = plot.circle(
x=x_col,
y=line_col,
source=source,
size=20,
color="black",
fill_alpha=0.2,
line_width=2,
)
# Create legend items
legend_items = [
("Project Emissions", [bar_renderers[0]]),
("Import Emissions", [bar_renderers[1]]),
("Carbon Target", [target_renderer]),
]
# Add Legend
legend = Legend(items=legend_items)
plot.add_layout(legend, "right")
plot.legend[0].items.reverse() # Reverse legend to match stacked order
plot.legend.click_policy = "hide" # Add interactivity to the legend
# Note: Doesn't rescale the graph down, simply hides the area
# Note2: There's currently no way to auto-size legend based on graph size(?)
# except for maybe changing font size automatically?
show_hide_legend(plot=plot) # Hide legend on double click
# Format Axes (labels, number formatting, range, etc.)
plot.xaxis.axis_label = "Period"
plot.yaxis.axis_label = "Emissions ({})".format(carbon_unit)
plot.yaxis.formatter = NumeralTickFormatter(format="0,0")
plot.y_range.end = ylimit # will be ignored if ylimit is None
# Add delivered RPS HoverTool
r_delivered = bar_renderers[0] # renderer for delivered RPS
hover = HoverTool(
tooltips=[
("Period", "@period"),
(
"Project Emissions",
"@%s{0,0} %s (@fraction_of_project_emissions{0%%})"
% (stacked_cols[0], carbon_unit),
),
],
renderers=[r_delivered],
toggleable=False,
)
plot.add_tools(hover)
# Add curtailed RPS HoverTool
r_curtailed = bar_renderers[1] # renderer for curtailed RPS
hover = HoverTool(
tooltips=[
("Period", "@period"),
(
"Import Emissions",
"@%s{0,0} %s (@fraction_of_import_emissions{0%%})"
% (stacked_cols[1], carbon_unit),
),
],
renderers=[r_curtailed],
toggleable=False,
)
plot.add_tools(hover)
# Add RPS Target HoverTool
hover = HoverTool(
tooltips=[
("Period", "@period"),
("Carbon Target", "@%s{0,0} %s" % (line_col, carbon_unit)),
(
"Marginal Cost",
"@carbon_cap_marginal_cost_per_emission{0,0} %s/%s"
% (cost_unit, carbon_unit),
),
],
renderers=[target_renderer],
toggleable=False,
)
plot.add_tools(hover)
return plot | e320a523bbdbfc12a3e84948935803da5304624e | 3,658,997 |
def get_app(name, **kwargs):
"""Returns an instantiated Application based on the name.
Args:
name (str): The name of the application
kwargs (dict): Keyword arguments used for application instantiation
Returns:
deepcell.applications.Application: The instantiated application
"""
name = str(name).lower()
app_map = dca.settings.VALID_APPLICATIONS
try:
return app_map[name]['class'](**kwargs)
except KeyError:
raise ValueError('{} is not a valid application name. '
'Valid applications: {}'.format(
name, list(app_map.keys()))) | 1fe9d1e300a086b7184760556c65470c62a0cc14 | 3,658,998 |
def worker_complete():
"""Complete worker."""
participant_id = request.args.get('participant_id')
if not participant_id:
return error_response(
error_type="bad request",
error_text='participantId parameter is required'
)
try:
_worker_complete(participant_id)
except KeyError:
return error_response(error_type='ParticipantId not found: {}'.format(participant_id))
return success_response(status="success") | e30b45e84025b11bcf6640931f72d9fc4f4f9873 | 3,658,999 |
def combine(connected_events):
"""
Combine connected events into a graph.
:param connected_events: see polychronous.filter
:return: graph_of_connected_events
"""
graph_of_connected_events = nx.Graph()
graph_of_connected_events.add_edges_from(connected_events)
return (graph_of_connected_events) | 99471930f70bea0583d36d3c0c13fc62b23d6fe8 | 3,659,000 |
import hashlib
def calculate_hash(filepath, hash_name):
"""Calculate the hash of a file. The available hashes are given by the hashlib module. The available hashes can be listed with hashlib.algorithms_available."""
hash_name = hash_name.lower()
if not hasattr(hashlib, hash_name):
raise Exception('Hash algorithm not available : {}'\
.format(hash_name))
with open(filepath, 'rb') as f:
checksum = getattr(hashlib, hash_name)()
for chunk in iter(lambda: f.read(4096), b''):
checksum.update(chunk)
return checksum.hexdigest() | 975fe0a2a4443ca3abc67ed950fb7200409f2497 | 3,659,001 |
def default_mp_value_parameters():
"""Set the different default parameters used for mp-values.
Returns
-------
dict
A default parameter set with keys: rescale_pca (whether the PCA should be
scaled by variance explained) and nb_permutations (how many permutations to
calculate empirical p-value). Defaults to True and 100, respectively.
"""
params = {"rescale_pca": True, "nb_permutations": 100}
return params | 0dcac3981154fbf0cc1fa0eeed6e83a1e1b63294 | 3,659,003 |
def svn_wc_diff(*args):
"""
svn_wc_diff(svn_wc_adm_access_t anchor, char target, svn_wc_diff_callbacks_t callbacks,
void callback_baton,
svn_boolean_t recurse, apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_diff(*args) | c4fbc11d26b6da2d595cb79314b0d901b084eb52 | 3,659,005 |
import re
def _FindResourceIds(header, resource_names):
"""Returns the numerical resource IDs that correspond to the given resource
names, as #defined in the given header file."
"""
pattern = re.compile(
r'^#define (%s) _Pragma\S+ (\d+)$' % '|'.join(resource_names))
with open(header, 'r') as f:
res_ids = [ int(pattern.match(line).group(2))
for line in f if pattern.match(line) ]
if len(res_ids) != len(resource_names):
raise Exception('Find resource id failed: the result is ' +
', '.join(str(i) for i in res_ids))
return set(res_ids) | 24847b1d4374a2022ae12f5161bd9df4becd110d | 3,659,006 |
import re
def resolve_request_path(requested_uri):
"""
Check for any aliases and alter the path accordingly.
Returns resolved_uri
"""
for key, val in PATH_ALIASES.items():
if re.match(key, requested_uri):
return re.sub(key, val, requested_uri)
return requested_uri | 5405a795a95279a354d455f3702dbf2c3dc6f1e0 | 3,659,007 |
def apim_api_delete(
client, resource_group_name, service_name, api_id, delete_revisions=None, if_match=None, no_wait=False):
"""Deletes an existing API. """
cms = client.api
return sdk_no_wait(
no_wait,
cms.delete,
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
if_match="*" if if_match is None else if_match,
delete_revisions=delete_revisions if delete_revisions is not None else False) | 4be4f895ae576ee1ffd08af31abcdad193b84b2c | 3,659,008 |
def deep_copy(obj):
"""Make deep copy of VTK object."""
copy = obj.NewInstance()
copy.DeepCopy(obj)
return copy | c00c4ff44dad5c0c018152f489955f08e633f5ed | 3,659,009 |
def get_dunn_index(fdist, *clusters):
"""
Returns the Dunn index for the given selection of nodes.
J.C. Dunn. Well separated clusters and optimal fuzzy
partitions. 1974. J.Cybern. 4. 95-104.
"""
if len(clusters)<2:
raise ValueError, "At least 2 clusters are required"
intra_dist = []
for c in clusters:
for i in c.get_leaves():
if i is not None:
# item intraclsuterdist -> Centroid Diameter
a = fdist(i.profile, c.profile)*2
intra_dist.append(a)
max_a = numpy.max(intra_dist)
inter_dist = []
for i, ci in enumerate(clusters):
for cj in clusters[i+1:]:
# intracluster dist -> Centroid Linkage
b = fdist(ci.profile, cj.profile)
inter_dist.append(b)
min_b = numpy.min(inter_dist)
if max_a == 0.0:
D = 0.0
else:
D = min_b / max_a
return D | c78c5302d78b5d5969a5edf9e19b81ee6f68bfbf | 3,659,010 |
import random
def sample(words, n=10) -> str:
"""Sample n random words from a list of words."""
return [random.choice(words) for _ in range(n)] | cad435238c776b5fcda84d50295ac50298bf3ab2 | 3,659,011 |
def cov_dense(n_features=100, scale=0.5,
edges='ones', pos=True, force_psd=True, random_state=None):
"""
Returns a covariance matrix with a constant diagonal and whose off diagnale elements are obtained from adj_mats.complete_graph()
Parameters
----------
n_features: int
scale: float
Scale of the off diagonal entries.
edges: str
How the edges should be sampled. See adj_mats.complete_graph()
pos: bool
Should the off-diagonal entries be all positive.
force_psd: bool
Make sure the covariance matrix is positive semi-definite zeroing out all negative eigenvalues.
random_state: None, int
Random seed for sampling.
Output
------
cov: array-like, (n_features, n_features)
The sampled covariance matrix.
"""
cov = complete_graph(n_nodes=n_features, edges=edges,
pos=pos, random_state=random_state)
cov = cov * scale
np.fill_diagonal(cov, 1.0)
if force_psd:
cov = project_psd(cov)
return cov | 48b8f5fec91ea11acaf9ce026d8b1742b5185604 | 3,659,013 |
def measure_fwhm(array):
"""Fit a Gaussian2D model to a PSF and return the FWHM
Parameters
----------
array : numpy.ndarray
Array containing PSF
Returns
-------
x_fwhm : float
FWHM in x direction in units of pixels
y_fwhm : float
FWHM in y direction in units of pixels
"""
yp, xp = array.shape
y, x, = np.mgrid[:yp, :xp]
p_init = models.Gaussian2D()
fit_p = fitting.LevMarLSQFitter()
fitted_psf = fit_p(p_init, x, y, array)
return fitted_psf.x_fwhm, fitted_psf.y_fwhm | e3ee047b453b979387505a19bdfebb75950a3916 | 3,659,014 |
def exists(profile, bucket, name):
"""Check if a file exists in an S3 bucket.
Args:
profile
A profile to connect to AWS with.
bucket
The name of the bucket you want to find the file in.
name
The name of a file.
Returns:
True if it exists, False if it doesn't.
"""
result = fetch_by_name(profile, bucket, name)
return len(result) > 0 | 5269cca9198a1d100b76b13f6e2fbf7314d948fd | 3,659,015 |
def project_login(driver):
"""
針對多綫程執行設定不同樣本編號,若修改問卷,也許提供該問卷樣本編號的第一順位號碼。
"""
SAMPLE_NUMBER = 20200101+sample_add
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.XPATH, '//*[@name="{}"][1]'
.format(str(SAMPLE_NUMBER))))).click() # 選擇樣本編號作答
sleep(1)
driver.find_element_by_class_name('btn.btn-blue').click() # 點擊開始訪問
print("STEP 3[project_login]: Project Login Successfully !")
form_basic_info(driver)
except NoSuchElementException:
driver.find_element_by_xpath('//*[@id="case_in_prj_next"]/a').click() # 若搜尋不到樣本編號則尋找下一頁按鈕
return project_login(driver)
else:
return "STEP 3[project_login]: Loading took too much time !" | db3ef26e1769cb991c887509427f9d809047398d | 3,659,017 |
def convert_convolutionfunction_to_image(cf):
""" Convert ConvolutionFunction to an image
:param cf:
:return:
"""
return create_image_from_array(cf.data, cf.grid_wcs, cf.polarisation_frame) | 6f5819abce6a987665ff49af9e5fca70f586a478 | 3,659,018 |
def macro(libname):
"""Decorator for macros (Moya callables)."""
def deco(f):
exposed_elements[libname] = f
return f
return deco | c4d06d2b9e3fa7913445554794027e68328ab918 | 3,659,019 |
import logging
import torch
def get_dataloaders(dataset, mode='train', root=None, shuffle=True, pin_memory=True,
batch_size=8, logger=logging.getLogger(__name__), normalize=False, **kwargs):
"""A generic data loader
Parameters
----------
dataset : {"openimages", "jetimages", "evaluation"}
Name of the dataset to load
root : str
Path to the dataset root. If `None` uses the default one.
kwargs :
Additional arguments to `DataLoader`. Default values are modified.
"""
pin_memory = pin_memory and torch.cuda.is_available # only pin if GPU available
Dataset = get_dataset(dataset)
if root is None:
dataset = Dataset(logger=logger, mode=mode, normalize=normalize, **kwargs)
else:
dataset = Dataset(root=root, logger=logger, mode=mode, normalize=normalize, **kwargs)
return DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=NUM_DATASET_WORKERS,
collate_fn=exception_collate_fn,
pin_memory=pin_memory) | 6bb40b3eb1bc004418dd8910dab1432cd3984ca5 | 3,659,020 |
def stats_file(filename, shape, dtype=None, file_format='raw',
out_of_core=True, buffer_size=None, max_memory=None,
progress_frequency=None):
"""stats_file(filename, shape, dtype=None, file_format='raw',
out_of_core=True, buffer_size=None, max_memory=None,
progress_frequency=None) -> StatsInfo object
returns a StatsInfo about the content of 'filename', which is a cube with 'shape'.
If 'out_of_core' (out-of-core) is True, process 'buffer_size' elements at a time.
"""
shape = Shape(shape)
filename = interpolate_filename(filename, shape=shape, file_format=file_format, dtype=dtype)
if out_of_core and file_format == 'raw':
stats_info = stats_info_out_of_core(filename, shape=shape, dtype=dtype,
buffer_size=buffer_size, max_memory=max_memory,
progress_frequency=progress_frequency)
else:
cube = read_cube(file=filename, shape=shape, dtype=dtype, file_format=file_format)
stats_info = StatsInfo.stats_info(cube)
return stats_info | 750b4d334aa25a2423e5278eab7cd5ee43385303 | 3,659,021 |
def _weight_func(dist):
"""Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid."""
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide="ignore"):
retval = 1.0 / dist
return retval**2 | 9052b68592f2f6cf4c59c623a3561f77d3d2b933 | 3,659,023 |
def two_poles(time_limit=_DEFAULT_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns the Cartpole Balance task with two poles."""
physics = Physics.from_xml_string(*get_model_and_assets(num_poles=2))
task = Balance(swing_up=True, sparse=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, **environment_kwargs) | b5236731d61464067073c3275cdd03d493f17821 | 3,659,024 |
def process_topic_entity(entity: dict, language: str) -> bool:
"""
Given a topic entity, gather its metadata
:param entity
:param language:
:type entity dict
:type language str
:returns bool
"""
try:
# Get ID
remote_id = entity["title"]
print("%s\t%s" % ("ID".ljust(16), remote_id))
# Get name from label
name = entity["labels"][language]["value"].lower()
print("%s\t%s" % ("name".ljust(16), name))
# Get brief
brief = entity["descriptions"][language]["value"].lower()
print("%s\t%s" % ("description".ljust(16), brief))
print_end()
except Exception as err:
print_err("%s error: %s" % (remote_id, err))
return False
return True | 2f03c0d24f35e49cd05ac11389b91345cc43de6e | 3,659,025 |
import math
import torch
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor:
"""Cut & paste from PyTorch official master until it's in a few official
releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
Args:
tensor (Tensor):
An n-dimensional `Tensor`.
mean (float):
Mean of the normal distribution.
std (float):
Standard deviation of the normal distribution.
a (float):
Minimum cutoff value.
b (float):
Maximum cutoff value.
"""
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
error_console.log(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"Fdistribution of values may be incorrect.", stacklevel=2
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill image with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor | 064fce46591c490999c6495999554700f478878b | 3,659,026 |
def inverse_update(C, m, return_drop=False):
"""
Compute the inverse of a matrix with the m-th row and column dropped given knowledge of the inverse of the original
matrix.
C = inv(A)
B = drop_col(drop_row(A, m),m)
computes inv(B) given only C
Args:
C: inverse of full matirix
m: row and col to drop
return_drop: whether to also return the array used to drop the m-th row/col.
Returns:
B
if return_drop:
the array to drop row/col using jnp.take(v, drop_array)
"""
drop = drop_array(C.shape[0], m)
_a = jnp.take(C, drop, axis=0) # drop m row
a = jnp.take(_a, drop, axis=1)
c = jnp.take(C, drop, axis=1)[None, m, :] # drop m col
b = _a[:, m, None]
d = C[m, m]
res = a - (b @ c) / d
if return_drop:
return res, drop
return res | 0f368d30d0459fe3d07d6fc1fa19dedc449e23e9 | 3,659,027 |
def loss_calc(settings, all_batch, market_batch):
""" Calculates nn's NEGATIVE loss.
Args:
settings: contains the neural net
all_batch: the inputs to neural net
market_batch: [open close high low] used to calculate loss
Returns:
cost: loss - l1 penalty
"""
loss = settings['nn'].loss_np(all_batch, market_batch)
return -loss | fdca1bb0fa86d1972c2a0f8b1fab10183e98fb4e | 3,659,028 |
def fits_downloaded_correctly(fits_loc):
"""
Is there a readable fits image at fits_loc?
Does NOT check for bad pixels
Args:
fits_loc (str): location of fits file to open
Returns:
(bool) True if file at fits_loc is readable, else False
"""
try:
img, _ = fits.getdata(fits_loc, 0, header=True)
return True
except Exception: # image fails to open
return False | 8df470b4b2895fb7d77cbccefbd2eae7f22c649b | 3,659,029 |
def union_of_rects(rects):
"""
Calculates union of two rectangular boxes
Assumes both rects of form N x [xmin, ymin, xmax, ymax]
"""
xA = np.min(rects[:, 0])
yA = np.min(rects[:, 1])
xB = np.max(rects[:, 2])
yB = np.max(rects[:, 3])
return np.array([xA, yA, xB, yB], dtype=np.int32) | 904cb58f593bedfbf0e28136a446b4f877955e49 | 3,659,030 |
from typing import List
from typing import Dict
def configure_services(config: List[Dict]) -> Dict[str, GcpServiceQuery]:
"""
Generate GcpServiceQuery list from config
:param config: list with GcpServieQuery's configuration
:return: mapping of service name to GcpServiceQuery objects
"""
if not isinstance(config, list):
raise GcpServiceQueryConfigError(f"Invalid GcpServiceQuery config {config}")
result = {}
for entry in config:
if not isinstance(entry, dict):
raise GcpServiceQueryConfigError(f"Invalid GcpServiceQuery entry type: '{entry}'. "
f"Should be dict, is {type(entry)}")
serviceName = entry.get(SERVICE_NAME, None)
version = entry.get(VERSION, None)
queries = entry.get(QUERIES, None)
if not serviceName or not version or not queries:
raise GcpServiceQueryConfigError(f"Missing required key for entry {entry}")
gcp_service_query = GcpServiceQuery(serviceName, version)
# Check multiple entries with same name
if serviceName in result:
raise GcpServiceQueryConfigError(f"Multiple GCP service with same name: {serviceName}")
result[serviceName] = gcp_service_query
return result | 3c9b9472de4d319446ec4da1d990ecc1750bd248 | 3,659,031 |
def tags_get():
"""
Get endpoint /api/tag
args:
optional company_filter(int) - id of a company, will only return tag relation to said company
optional crowd(int) - 0 - 2 specifing crowd sourcing option. Key:
0 - all tags
1 - Only crowd sourced tags
2 - Only non crowd sourced tags
optional only_ids - if set only returns ids of tags
return:
List Tags - A json list of all tags that match the optional args.
"""
request_data = request.get_json()
company_filter = get_if_exist(request_data, "company_filter")
only_ids = get_if_exist(request_data,"only_ids")
crowd = get_if_exist(request_data, "crowd")
if crowd:
if crowd > 2:
return status.HTTP_400_BAD_REQUEST
crowd = 0
if company_filter:
t = db.session.query(
Tag_company.tag,
).filter(Tag_company.company == int(company_filter)).group_by(Tag_company.tag).subquery('t')
Tag_query = Tag.query.filter(
Tag.id == t.c.tag
)
else:
Tag_query = Tag.query
if crowd != 0:
crowd = (1==crowd)
Tag_query = Tag_query.filter_by(crowd_soured = crowd)
tags = Tag_query.all()
if only_ids:
return jsonify([tag.id for tag in tags]), status.HTTP_200_OK
else:
return jsonify([tag.serialize for tag in tags]), status.HTTP_200_OK | c009c0b84bbc825383dffb1141361dd1732b7b19 | 3,659,032 |
def get_accept_languages(accept):
"""Returns a list of languages, by order of preference, based on an
HTTP Accept-Language string.See W3C RFC 2616
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html) for specification.
"""
langs = parse_http_accept_header(accept)
for index, lang in enumerate(langs):
langs[index] = lang_in_gettext_format(lang)
return langs | ad329605cd0101e61c2c21aa42f2c81a84db771b | 3,659,034 |
def get_princ_axes_xyz(tensor):
"""
Gets the principal stress axes from a stress tensor.
Modified from beachball.py from ObsPy, written by Robert Barsch.
That code is modified from Generic Mapping Tools (gmt.soest.hawaii.edu)
Returns 'PrincipalAxis' classes, which have attributes val, trend, plunge
Returns T, N, P
"""
tensor = np.array(tensor)
(D, V) = sorted_eigens(tensor)
pl = np.arcsin( -V[2] ) # 2
az = np.arctan2( V[0], -V[1] ) # 0 # 1
for i in range(0, 3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl *= 180 / np.pi
az *= 180 / np.pi
T = PrincipalAxis( D[0], az[0], pl[0] ) # 0 0 0
N = PrincipalAxis( D[1], az[1], pl[1] )
P = PrincipalAxis( D[2], az[2], pl[2] ) # 2 2 2
return(T, N, P) | e9285464e17eb987ebfd21c8e066ff745a856dc1 | 3,659,035 |
def extractYoushoku(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if 'The Other World Dining Hall' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'The Other World Dining Hall', vol, chp, frag=frag, postfix=postfix)
return False | 686daef4d594e0e53779be48d7c49a525cabe4ee | 3,659,036 |
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = np.asarray(diam)
extinction_efficiency = np.zeros(diam.shape)
scattering_efficiency = np.zeros(diam.shape)
absorption_efficiency = np.zeros(diam.shape)
extinction_crossection = np.zeros(diam.shape)
scattering_crossection = np.zeros(diam.shape)
absorption_crossection = np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = pd.DataFrame(index=diam)
out['extinction_efficiency'] = pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = pd.Series(extinction_crossection, index=diam)
out['scattering_crossection'] = pd.Series(scattering_crossection, index=diam)
out['absorption_crossection'] = pd.Series(absorption_crossection, index=diam)
return out, angular_scattering_natural | 4ce8fa518477c3eb38816d8f441207716b3a90df | 3,659,037 |
from typing import Tuple
def load_config_dict(pipette_id: str) -> Tuple[
'PipetteFusedSpec', 'PipetteModel']:
""" Give updated config with overrides for a pipette. This will add
the default value for a mutable config before returning the modified
config value.
"""
override = load_overrides(pipette_id)
model = override['model']
config = fuse_specs(model)
if 'quirks' not in override.keys():
override['quirks'] = {key: True for key in config['quirks']}
for top_level_key in config.keys():
if top_level_key != 'quirks':
add_default(config[top_level_key]) # type: ignore
config.update(override) # type: ignore
return config, model | 485db2aad493eda30e6dad07b3d6c9413bc5c3c8 | 3,659,038 |
def ErrorAddEncKey(builder, encKey):
"""This method is deprecated. Please switch to AddEncKey."""
return AddEncKey(builder, encKey) | c39bb36b3923ca1a0e508b23ef84a6de130700a3 | 3,659,039 |
def _read_txs_from_file(f):
"""
Validate headers and read buy/sell transactions from the open file-like object 'f'.
Note: we use the seek method on f.
"""
ans = []
f.seek(0)
workbook = openpyxl.load_workbook(f)
sheet = workbook.active
all_contents = list(sheet.rows)
_validate_header(all_contents[0])
contents = all_contents[1:]
for row in contents:
item = _tx_from_gemini_row(row)
if item is not None:
ans.append(item)
return ans | 0c62c647a2ff1a797fb5e8593279bbf64bc0d495 | 3,659,040 |
from typing import Union
def get_generator_regulation_lower_term_4(data, trader_id, intervention) -> Union[float, None]:
"""Get L5RE term 4 in FCAS availability calculation"""
# Term parameters
enablement_min = get_effective_enablement_min(data, trader_id, 'L5RE')
energy_target = lookup.get_trader_solution_attribute(data, trader_id, '@EnergyTarget', float, intervention)
lower_slope_coefficient = get_lower_slope_coefficient(data, trader_id, 'L5RE')
# Ignore limit if slope coefficient = 0
if lower_slope_coefficient == 0:
return None
return 0 if (lower_slope_coefficient is None) else (energy_target - enablement_min) / lower_slope_coefficient | 626ab26f92feefea25777046c1fc37c4115f7be8 | 3,659,041 |
def count_parameters(model):
"""count model parameters"""
return sum(p.numel() for p in model.parameters() if p.requires_grad) | 5edcb3ee03794cb66f5986670c4825efab93a1d8 | 3,659,042 |
def string_rule_variable(label=None, params=None, options=None, public=True):
"""
Decorator to make a function into a string rule variable.
NOTE: add **kwargs argument to receive Rule as parameters
:param label: Label for Variable
:param params: Parameters expected by the Variable function
:param options: Options parameter to specify expected options for the variable.
The value used in the Condition IS NOT checked against this list.
:param public: Flag to identify if a variable is public or not
:return: Decorator function wrapper
"""
return _rule_variable_wrapper(StringType, label, params=params, options=options, public=public) | 3bd35ac2e27c58ee35f7e13bb359cb8240f8efda | 3,659,043 |
def detect_horizon_lines(image_thre, row, busbar, cell_size, thre=0.6, split=50, peak_interval=None, margin=None):
""" Detect horizontal edges by segmenting image into vertical splits
Parameters
---------
image_thre: array
Adaptive threshold of raw images
row: int
Number of rows of solar module
busbar: int
Number of busbars of a solar cell
cell_size: int
Output cell size in pixel
thre: float
Peak intensity above THRE will be set as 1.
Note that the edge's peak intensity should be lowest because edges are black
split: int
Number of splits
peak_interval: int
Distance between each peak.
Returns
-------
hline_abs_couple: array
Suppose a line is y=a*x+b.
Return 'a' and 'b' of a couple edges (top and bottom of a cell).
"""
#width = image_thre.shape[1]
#end = int(width / split)
#image_vsplits = np.hsplit(image_thre[:, :end * split], split) # vertical splits
#image_vsplits.append(image_thre[:, end * split:])
image_vsplits = split_img(image_thre, split=split, direction=1)
edge_y = []
inx_x = []
for inx, im_split in enumerate(image_vsplits):
#sum_split = np.sum(im_split, axis=1)
#sum_split = sum_split / np.max(sum_split)
#sum_split[sum_split > thre] = 1
#if peak_interval is None:
# peak_interval = int(cell_size / (busbar + 1) * 0.5)
#peak, _ = find_peaks(-1 * sum_split, distance=peak_interval)
peak = detect_peaks(im_split, 1, cell_size, busbar, thre, peak_interval, margin=margin)
if len(peak) >= row * (busbar + 1) - 1:
peak_new = [peak[0]]
for i in range(1, len(peak) - 1):
if np.abs(peak[i] - peak[i + 1]) < 15:
peak_mean = (peak[i] + peak[i + 1]) / 2
peak_new.append(peak_mean)
elif np.abs(peak[i] - peak[i - 1]) > 15:
peak_new.append(peak[i])
peak_new.append(peak[-1])
peak_new = np.array(peak_new)
peak_new_a = np.delete(peak_new, 0)
peak_new_b = np.delete(peak_new, -1)
peak_new_detect = peak_new[detectoutliers(np.abs(peak_new_a - peak_new_b), rate=0.5, option=1)]
if len(peak_new_detect) == (busbar + 1) * row + 1:
edge_y.append(peak_new_detect)
inx_mean = ((2 * inx + 1) * (image_thre.shape[1] / split) - 1) / 2
inx_x.append(inx_mean)
edge_y = np.array(edge_y)
hlines = list(zip(*edge_y))
hlines = np.array(hlines)
inx_x = np.array(inx_x)
# for lines in hlines:
# lines_new = self.detectoutliers(lines, option=0)
# while np.std(lines_new) > 10:
# lines_new = self.detectoutliers(lines, rate=1, option=0)
# hb_abs = [] # all lines including busbar
hb_abs = linear_regression(inx_x, hlines, outlier_filter=True)
hline_abs_couple = [] # all lines excluding busbar
# for horizonline in hlines:
# ab, _ = curve_fit(self.linear, inx_x, horizonline) # y = ax + b
# hb_abs.append(ab)
hline_abs_couple = [(hb_abs[(busbar + 1) * i], hb_abs[(busbar + 1) * (i + 1)]) for i in range(row)]
# hline_abs = [(hb_abs[(4+1)*i],hb_abs[(4+1)*(i+1)]) for i in range(6)]
# hline_abs = [(hb_abs[(self.busbar+2)*i],hb_abs[(self.busbar+2)*(i+1)-1]) for i in range(self.row)]
return hline_abs_couple | e8365b29829d6e1a71c4c9caefff221d9357b0a3 | 3,659,044 |
def countRoem(cards, trumpSuit=None):
"""Counts the amount of roem (additional points) in a list of cards
Args:
Returns:
Integer value how many points of roem are in the cards in total
"""
roem = 0
# Stuk
# Without a trumpSuit, stuk is impossible
if trumpSuit is not None:
#trumpKing = list(filter(lambda c: c.suit == trumpSuit and c.rank == 4, cards))
#trumpQueen = list(filter(lambda c: c.suit == trumpSuit and c.rank == 5, cards))
trumpKing = [card for card in cards if card.suit == trumpSuit and card.rank == 4]
trumpQueen = [card for card in cards if card.suit == trumpSuit and card.rank == 5]
if trumpKing and trumpQueen:
roem += 20
# Normal roem
# For each suit we check whether there are 3 cards in that suit, if so there is chance for roem
for i in range(4):
#cardsInSuit = list(filter(lambda c: c.suit == i, cards))
cardsInSuit = [card for card in cards if card.suit == i]
if len(cardsInSuit) >= 3:
cards = cardsInSuit
# We sort the list and check the difference between consecutive cards
cards.sort(key=lambda c: c.rank)
subtractList = []
for i in range(len(cards) - 1):
#subtract = abs(cards[i].roemRank - cards[i+1].roemRank)
subtract = abs(ROEMRANKS[cards[i].rank] - ROEMRANKS[cards[i].rank])
subtractList.append(subtract)
# If more than 1 difference equals 1, we know at least 3 cards have consecutive ranks
#lenOfOnes = len(list(filter(lambda x: x == 1, subtractList)))
lenOfOnes = len([x for x in subtractList if x == 1])
if lenOfOnes == 2:
roem += 20
elif lenOfOnes == 3:
roem += 50
return roem | 31e2dbf346801fa81e5a5905a480f6d5b8e9ce1a | 3,659,045 |
from typing import Optional
def batch_to_space(
data: NodeInput,
block_shape: NodeInput,
crops_begin: NodeInput,
crops_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform BatchToSpace operation on the input tensor.
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
:param data: Node producing the data tensor.
:param block_shape: The sizes of the block of values to be moved.
:param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
:param crops_end: Specifies the amount to crop from the end along each axis of `data`.
:param name: Optional output node name.
:return: The new node performing a BatchToSpace operation.
"""
return _get_node_factory_opset2().create(
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end),
) | fe7004243e7c4a6dfd78b1f39df22ba7290c9244 | 3,659,046 |
def url_in(url):
""" Send a URL and I'll post it to Hive """
custom_json = {'url': url}
trx_id , success = send_notification(custom_json)
return trx_id, success | ecfcb02cdbd9050a5a305f38d9673d64b9b1d307 | 3,659,047 |
def login():
"""
Display a basic login form in order to log in a user
"""
if request.method == 'GET':
return render_template('login.html')
else:
try:
usr = User.query.get(request.form['user_id'])
if bcrypt.checkpw(request.form['user_password'].encode('utf-8'),usr.password):
login_user(usr, remember=True)
flash('Logged in successfully')
return redirect(session['next_url'])
except Exception as e:
print("Sorry this user don't exist")
print(e)
return render_template('login.html') | 37702dc290d627544d5714ed21d8804eaa00f354 | 3,659,048 |
def hflip(stream):
"""Flip the input video horizontally.
Official documentation: `hflip <https://ffmpeg.org/ffmpeg-filters.html#hflip>`__
"""
return FilterNode(stream, hflip.__name__).stream() | 140f7d4ceecee09e5f0ba7db9a68cee15e536ffa | 3,659,049 |
def get_diagonal_ripple_rainbows_2():
"""
Returns 11 diagonal ripple rainbows
Programs that use this function:
- Diagonal Ripple 3
- Diagonal Ripple 4
"""
rainbow01 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8]
]
rainbow02 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8]
]
rainbow03 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8]
]
rainbow04 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8]
]
rainbow05 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8]
]
rainbow06 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8]
]
rainbow07 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8]
]
rainbow08 = [
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H]
]
rainbow09 = [
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow10 = [
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow11 = [
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow12 = [
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow13 = [
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow14 = [
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow15 = [
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
return rainbow01, rainbow02, rainbow03, rainbow04, rainbow05, \
rainbow06, rainbow07, rainbow08, rainbow09, rainbow10, \
rainbow11, rainbow12, rainbow13, rainbow14, rainbow15 | ce917a063de580b2fcacfe2b59991585aefe30a4 | 3,659,050 |
def matrix_prod(A, B, display = False):
"""
Computes the matrix product of two matrices using array slicing and vector operations.
"""
if A.shape[1] != B.shape[0]:
raise ValueError("Dimensions not compatible.")
# Not allowed!?
#matrix = A.dot(B)
# Dotproduct of each A.row*B.clm
matrix = np.array([[np.sum(A[i,:]*B[:,j]) for j in range(B.shape[1])]
for i in range(A.shape[0])])
if display:
print(matrix)
return matrix | c38c3c3c9b1d2cc3edf6efb1997fe94a15c870ec | 3,659,051 |
def remove_quat_discontinuities(rotations):
"""
Removing quat discontinuities on the time dimension (removing flips)
:param rotations: Array of quaternions of shape (T, J, 4)
:return: The processed array without quaternion inversion.
"""
rots_inv = -rotations
for i in range(1, rotations.shape[0]):
# Compare dot products
replace_mask = np.sum(rotations[i - 1: i] * rotations[i: i + 1], axis=-1) < np.sum(
rotations[i - 1: i] * rots_inv[i: i + 1], axis=-1)
replace_mask = replace_mask[..., np.newaxis]
rotations[i] = replace_mask * rots_inv[i] + (1.0 - replace_mask) * rotations[i]
return rotations | 7d3874f5c56f82f3a8951daef48ac115f7f8943a | 3,659,052 |
import glob
def compute_profile_from_frames(frames_str, ax, bt, box, N_bins=100, \
shift=None, verbose=False):
"""
Compute a density profile from a batch of xyz frames.
Input
=====
- frames_str: a regex containing frames in xyz format
- ax: axis along which to compute the profile
- bt: bead type
- box: box size, a (3, 3) matrix
- N_bins: number of bins
Output
======
- r: position vector
- pr: density profile vector
"""
frames = glob.glob(frames_str)
assert len(frames) != 0, "No xyz frames captured."
Nf = len(frames)
N = int(open(frames[0], "r").readline())
if verbose:
print(frames)
L = np.diag(box)
bins = np.linspace(0, L[ax], N_bins + 1)
dr = bins[1] - bins[0]
r = dr / 2.0 + bins[:-1]
Lsurf = L[list(set(range(3)).difference([ax]))] # cross-sectional surface
pr = np.zeros_like(r)
for frame in frames:
bl, X0 = read_xyz(frame)
if shift is not None:
assert len(shift) == 3, "Vector of shifting must be of size 3."
shift = np.array(shift)
X0 = X0 + shift
X0 = X0 % L
if bt == -1:
X = X0
else:
X = X0[bl == bt]
pr += np.histogram(X[:, ax], bins=bins)[0]
pr = pr / (dr * np.prod(Lsurf)) / Nf
return r, pr | 70702dbcf73f2a7e9894899ca20f81eadc3046fe | 3,659,053 |
import urllib
import requests
import json
def wikipedia_search(query, lang="en", max_result=1):
"""
https://www.mediawiki.org/wiki/API:Opensearch
"""
query = any2unicode(query)
params = {
"action":"opensearch",
"search": query,
"format":"json",
#"formatversion":2,
#"namespace":0,
"suggest":"true",
"limit": 10
}
urlBase = "https://{}.wikipedia.org/w/api.php?".format(lang)
url = urlBase + urllib.urlencode(any2utf8(params))
#logging.info(url)
r = requests.get(url)
jsonData = json.loads(r.content)
#logging.info(jsonData)
items = []
ret = {"query":query, "itemList":items}
for idx, label in enumerate(jsonData[1][0:max_result]):
description = jsonData[2][idx]
url = jsonData[3][idx]
item = {
"name": label,
"description":description,
"url": url,
}
items.append(item)
return ret | e88b50c11d78989e086417d15e91515d24151586 | 3,659,054 |
def group_result(result, func):
"""
:param result: A list of rows from the database: e.g. [(key, data1), (key, data2)]
:param func: the function to reduce the data e.g. func=median
:return: the data that is reduced. e.g. [(key, (data1+data2)/2)]
"""
data = {}
for key, value in result:
if key in data.keys():
data[key].append(value)
else:
data[key] = [value]
for key in data:
data[key] = func(data[key])
return data.items() | 7687521c216210badcda5ee54bd59a3bc6a234bd | 3,659,055 |
import torch
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
# img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = img.transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim | 65159c8ce3a2df3cb09a6f1f318bb3374943e314 | 3,659,056 |
import uuid
def extractLogData(context):
"""
helper function to extract all important data from the web context.
:param context: the web.py context object
:return: a dictionary with all information for the logging.
"""
logData = {}
logData['ip'] = context.ip
logData['account'] = context.env.get('HTTP_RUCIO_ACCOUNT')
logData['appid'] = 'clients' # has to changed, but atm no appid is send with the clients
logData['clientref'] = context.env.get('HTTP_RUCIO_CLIENTREF')
logData['uri'] = context.method + ' ' + context.protocol + "://" + context.host + context.homepath + context.fullpath
logData['requestid'] = uuid()
logData['requestHeader'] = context.env
logData['responseHeader'] = ''
logData['httpCode'] = ''
logData['duration'] = ''
return logData | 5fb68d4f19dae0b7175a089dd1366cab0407152b | 3,659,057 |
def Backbone(backbone_type='ResNet50', use_pretrain=True):
"""Backbone Model"""
weights = None
if use_pretrain:
weights = 'imagenet'
def backbone(x_in):
if backbone_type == 'ResNet50':
return ResNet50(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
elif backbone_type == 'MobileNetV2':
return MobileNetV2(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
else:
raise TypeError('backbone_type error!')
return backbone | 23bc493e8306d5dc5dba33cd2f67de231cbb3e02 | 3,659,058 |
def start(ctx, vca_client, **kwargs):
"""
power on server and wait network connection availability for host
"""
# combine properties
obj = combine_properties(
ctx, kwargs=kwargs, names=['server'],
properties=[VCLOUD_VAPP_NAME, 'management_network'])
# get external
if obj.get('use_external_resource'):
ctx.logger.info('not starting server since an external server is '
'being used')
else:
vapp_name = get_vapp_name(ctx.instance.runtime_properties)
config = get_vcloud_config()
vdc = vca_client.get_vdc(config['vdc'])
vapp = vca_client.get_vapp(vdc, vapp_name)
_power_on_vm(ctx, vca_client, vapp, vapp_name)
if not _get_state(ctx=ctx, vca_client=vca_client):
return ctx.operation.retry(
message="Waiting for VM's configuration to complete",
retry_after=5) | 6e3e3a94095ef200e586f7dfdc7e117ae3ee375f | 3,659,059 |
def softplus(z):
"""Numerically stable version of log(1 + exp(z))."""
# see stabilizing softplus: http://sachinashanbhag.blogspot.com/2014/05/numerically-approximation-of-log-1-expy.html # noqa
mu = z.copy()
mu[z > 35] = z[z > 35]
mu[z < -10] = np.exp(z[z < -10])
mu[(z >= -10) & (z <= 35)] = log1p(np.exp(z[(z >= -10) & (z <= 35)]))
return mu | f683c1f2240d053c4ee2c24f64ff5576c0d9d32d | 3,659,060 |
from typing import Mapping
from typing import Hashable
from typing import Union
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import OrderedDict
from typing import Any
def merge_indexes(
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],
variables: Mapping[Hashable, Variable],
coord_names: Set[Hashable],
append: bool = False,
) -> "Tuple[OrderedDict[Any, Variable], Set[Hashable]]":
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace = {} # Dict[Any, Variable]
vars_to_remove = [] # type: list
error_msg = "{} is not the name of an existing variable."
for dim, var_names in indexes.items():
if isinstance(var_names, str) or not isinstance(var_names, Sequence):
var_names = [var_names]
names, codes, levels = [], [], [] # type: (list, list, list)
current_index_variable = variables.get(dim)
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
if (
current_index_variable is not None
and var.dims != current_index_variable.dims
):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims)
)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
try:
current_codes = current_index.codes
except AttributeError:
# fpr pandas<0.24
current_codes = current_index.labels
names.extend(current_index.names)
codes.extend(current_codes)
levels.extend(current_index.levels)
else:
names.append("%s_level_0" % dim)
cat = pd.Categorical(current_index.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else:
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
names.append(n)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(levels, codes, names=names)
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = OrderedDict(
[(k, v) for k, v in variables.items() if k not in vars_to_remove]
)
new_variables.update(vars_to_replace)
new_coord_names = coord_names | set(vars_to_replace)
new_coord_names -= set(vars_to_remove)
return new_variables, new_coord_names | b893d118312697d1995a0a42bbff8354b73ca642 | 3,659,061 |
def least_squares(m, n):
""" Create a least squares problem with m datapoints and n dimensions """
A = np.random.randn(m, n)
_x = np.random.randn(n)
b = A.dot(_x)
x = cp.Variable(n)
return (x, cp.Problem(cp.Minimize(cp.sum_squares(A * x - b) + cp.norm(x, 2)))) | 21b3b4577ec232f6e74d1f096946d0923f867cf7 | 3,659,062 |
def expand_amn(a, kpoints, idx, Rvectors, nproj_atom=None):
"""
Expand the projections matrix by translations of the orbitals
Parameters
----------
a : ndarray, shape (nkpts, nbnds, nproj)
kpoints : ndarray, shape (nkpts, 3)
idx : ndarray
indices of translated orbitals
Rvectors: ndarray
translation vectors for the orbitals
nproj_atom: ndarray, optional
number of projections on each atom, with idx and Rvectors now describing
atoms instead of orbitals
"""
assert len(Rvectors) == len(idx)
if nproj_atom is not None:
assert len(nproj_atom) == len(idx)
idx_new = []
Rvectors_new = []
for iatom, i in enumerate(idx):
offset = np.sum(nproj_atom[:i])
for j in range(nproj_atom[i]):
idx_new.append(offset+j)
Rvectors_new.append(Rvectors[iatom])
idx = idx_new
Rvectors = Rvectors_new
nkpts, nbnds, nproj = a.shape
a1 = np.zeros((nkpts, nbnds, len(idx)), dtype=complex)
k_dot_R = np.einsum('ki,ri->kr', kpoints, Rvectors)
exp_factors = np.exp(-1j * 2*np.pi * k_dot_R)
a1 = a[:, :, idx] * exp_factors[:, np.newaxis, :]
return a1 | d68a7cd4cb019b2d516305d0b6a2b45f6a422ba8 | 3,659,065 |
def combine_basis_vectors(weights, vectors, default_value=None, node_num=None):
"""
Combine basis vectors using ``weights`` as the Manning's n value for each
basis vector. If a ``default_value`` is set then all nodes with out data
are set to the ``default_value``.
:type weights: :class:`numpy.ndarray`
:param weights: array of size (num_of_basis_vec, 1)
:type vectors: list of dicts OR :class:`numpy.ndarray` of size (node_num,
num_of_basis_vec)
:param vectors: basis vectors
:returns: an array of size (node_num, 1) containing the manningsn value at
all nodes in numerical order or a dictionary
"""
if len(weights) != len(vectors):
raise LenError('weights, vectors', 'dimensions do not match')
if isinstance(vectors[0], np.array):
combine_bv_array(weights, vectors)
elif default_value and node_num:
return dict_to_array(add_dict(vectors, weights)[0], default_value,
node_num)
else:
return add_dict(vectors, weights)[0] | 50a0cc5ba8ad88a480fc589f6fbe184548700485 | 3,659,066 |
from typing import List
from typing import Tuple
from typing import Any
def _prepare_data_for_node_classification(
graph: nx.Graph, seed_node: int
) -> List[Tuple[Any, Any]]:
"""
Position seed node as the first node in the data.
TensorFlow GNN has a convention whereby the node to be classified, the "seed node",
is positioned first in the component. This is for use with layers such as
`tfgnn.keras.layers.ReadoutFirstNode` which extracts the first node from a component.
"""
seed_data = graph.nodes(data=True)[seed_node]
data = [(seed_data["features"], seed_data["label"])]
data += [
(data["features"], data["label"])
for node, data in graph.nodes(data=True)
if node != seed_node
]
return data | 3ed718e583d9e96b2c5bd28e5640c36e5e009065 | 3,659,067 |