content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_invalid_value_message(value_name: str, value: str, line_no: int, uid: str, expected_vals: "list[str]") -> str:
"""
Returns the formatted message template for invalid value while parsing students data!
"""
msg = f"Invalid {value_name} <span class=\"font-weight-bold\">{value}</span>\
on line <span class=\"text-primary\">{line_no}</span>\
of UID <span class=\"text-secondary\">{uid}</span>.\
Should be one of {expected_vals}"
return msg | cb7dc84b566bb117fe53ce5956919978558ccbbf | 3,513 |
def compute_score_for_coagulation(platelets_count: int) -> int:
"""
Computes score based on platelets count (unit is number per microliter).
"""
if platelets_count < 20_000:
return 4
if platelets_count < 50_000:
return 3
if platelets_count < 100_000:
return 2
if platelets_count < 150_000:
return 1
return 0 | dc6e9935555fbb0e34868ce58a8ad8bc77be8b0c | 3,514 |
def check_horizontal_visibility(board: list):
"""
Check row-wise visibility (left-right and vice versa)
Return True if all horizontal hints are satisfiable,
i.e., for line 412453* , hint is 4, and 1245 are the four buildings
that could be observed from the hint looking to the right.
>>> check_horizontal_visibility(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_horizontal_visibility(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_horizontal_visibility(['***21**', '452413*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
res_num = 0
res = 1
k = 1
for i in board:
if i[0] != '*':
while i[k + 1] != i[-1]:
if i[k] < i[k + 1]:
res += 1
k += 1
if res == int(i[0]):
res_num = res_num
else:
res_num += 1
if i[-1] != '*':
i = i[::-1]
while i[k + 1] != i[-1]:
if i[k] < i[k + 1]:
res += 1
k += 1
if res == int(i[0]):
res_num = res_num
else:
res_num += 1
res = 1
k = 1
if res_num == 0:
return True
else:
return False | b84ff29fde689069ba5e92b10d54c8f0528aa321 | 3,515 |
def verify(params, vk, m, sig):
""" verify a signature on a clear message """
(G, o, g1, hs, g2, e) = params
(g2, X, Y) = vk
sig1 , sig2 = sig
return not sig1.isinf() and e(sig1, X + m * Y) == e(sig2, g2) | 7413d9172d383c3602cbc2b8348c4ace61c40302 | 3,516 |
import os
def prepend_items():
"""
Return a function than prepend any item from "paths" list with "prefix"
"""
def prepend_func(prefix, paths):
return [os.path.join(prefix, item) for item in paths]
return prepend_func | b7c4fd8e1c53c82ba7dd1e826feb084e6543691b | 3,517 |
import itertools
def average_distance(points, distance_func):
"""
Given a set of points and their pairwise distances, it calculates the average distances
between a pair of points, averaged over all C(num_points, 2) pairs.
"""
for p0, p1 in itertools.combinations(points, 2): # assert symmetry
assert abs(distance_func(p0, p1) - distance_func(p1, p0)) < 1e-7, \
'{} {} {} {}'.format(p0, p1, distance_func(p0, p1), distance_func(p1, p0))
for p0, p1, p2 in itertools.combinations(points, 3): # assert triangle inequality
assert distance_func(p0, p1) + distance_func(p1, p2) >= distance_func(p0, p2)
assert distance_func(p0, p2) + distance_func(p1, p2) >= distance_func(p0, p1)
assert distance_func(p0, p1) + distance_func(p0, p2) >= distance_func(
p1, p2), '{p0}-{p1}={d01} {p0}-{p2}={d02} {p1}-{p2}={d12}'.format(
p0=p0, p1=p1, p2=p2, d01=distance_func(p0, p1), d02=distance_func(p0, p2),
d12=distance_func(p1, p2))
# actual calculation happens below
total_dist = 0.0
all_pairs = list(itertools.combinations(points, 2))
for p0, p1 in all_pairs:
total_dist += distance_func(p0, p1)
if all_pairs:
return float(total_dist) / len(all_pairs)
else:
return 0.0 | 236735da94e902dd7fbe062de8abb9a02208156f | 3,520 |
import glob
import os
import random
def get_filenames(feature_folder, glob_pattern, sample_size=None):
"""
Finds the all the files in the given feature folder which matches the glob pattern.
:param feature_folder: The folder to search for files.
:param glob_pattern: The glob pattern to use for finding files.
:param sample_size: If given, restrict the number of files loaded to a sample of this size.
:return: A list of files matching the glob pattern in the feature folder.
"""
files = glob.glob(os.path.join(feature_folder, glob_pattern))
if sample_size is not None and sample_size < len(files):
files = random.sample(files, sample_size)
return files | 19b97b9e981b8fe0b978d5af2240dc22c02d7e93 | 3,522 |
def create_message(username, message):
""" Creates a standard message from a given user with the message
Replaces newline with html break """
message = message.replace('\n', '<br/>')
return '{{"service":1, "data":{{"message":"{mes}", "username":"{user}"}} }}'.format(mes=message, user=username) | d12807789d5e30d1a4a39c0368ebe4cf8fbde99e | 3,523 |
def obtener_atletas_pais(atletas: list, pais_interes: str) -> list:
"""
Función que genera una lista con la información de los atletas del país dado,
sin importar el año en que participaron los atletas.
Parámetros:
atletas: list de diccionarios con la información de cada atleta.
pais_interes: str.
Retorna:
atletas_pais: list con los diccionarios de los atletas del país.
diccionario de cada atleta: {'nombre': str, 'evento': str, 'anio': int}.
"""
# Inicializar lista de atletas del país.
atletas_pais = list()
# Inicio de recorrido por la lista de atletas.
for cada_atleta in atletas:
# Definición de variables del atleta actual.
anio_actual = cada_atleta['anio']
nombre_actual = cada_atleta['nombre']
evento_actual = cada_atleta['evento']
pais_actual = cada_atleta['pais']
# Verificación de nombre y rango de tiempo.
if pais_actual == pais_interes:
# Se añade el diccionario de atleta a la lista de atletas.
atletas_pais.append({'nombre': nombre_actual, 'evento': evento_actual, 'anio': anio_actual})
return atletas_pais | 4b03364a76af4e7818f977731b259fdfee6817ee | 3,525 |
def oddify(n):
"""Ensure number is odd by incrementing if even
"""
return n if n % 2 else n + 1 | dee98063cb904cf462792d15129bd90a4b50bd28 | 3,527 |
def concatenation(clean_list):
"""
Concatenation example.
Takes the processed list for your emails and concatenates any elements that are currently separate that you may
wish to have as one element, such as dates.
E.g. ['19', 'Feb', '2018'] becomes ['19 Feb 2018]
Works best if the lists are similar as it works by using the index of an element and joining it to other elements
using a positive or negative index.
"""
index_of_item = clean_list.index("your chosen item")
clean_list[:index_of_item] = [' '.join(clean_list[:index_of_item])] # joins together every element from start to the index of the item
# to join elements mid-list:
another_index = clean_list.index("another item") # date concatenation
date_start = another_index - 3
date_end = another_index
clean_list[date_start:date_end] = [' '.join(clean_list[date_start:date_end])] # joins the 3 elements before 'another item' index
return clean_list | 59b727f21e663f2836f6fe939f4979e9f7484f62 | 3,528 |
import sys
import hashlib
def calc_md5_sign(secret, parameters):
"""
根据app_secret和参数串计算md5 sign,参数支持dict(建议)和str
:param secret: str
:param parameters:
:return:
"""
if hasattr(parameters, "items"):
keys = list(parameters.keys())
keys.sort()
parameters_str = "%s%s%s" % (secret,
''.join('%s%s' % (key, parameters[key]) for key in keys),
secret)
else:
parameters_str = parameters
if sys.version_info >= (3, 0): # python3内置unicode支持,直接编码即可
parameters_str = parameters_str.encode(encoding='utf-8')
else: # py2 还要检测unicode
parameters_str = mixStr_py2(parameters_str)
sign_hex = hashlib.md5(parameters_str).hexdigest().upper()
return sign_hex | f13cd469a86942c011f3d419a4a2cf89c79cf2df | 3,529 |
import torch
def model_predict(model, test_loader, device):
"""
Predict data in dataloader using model
"""
# Set model to eval mode
model.eval()
# Predict without computing gradients
with torch.no_grad():
y_preds = []
y_true = []
for inputs, labels in test_loader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
y_preds.append(preds)
y_true.append(labels)
y_preds = torch.cat(y_preds).tolist()
y_true = torch.cat(y_true).tolist()
return y_preds, y_true | 0b43a28046c1de85711f7db1b3e64dfd95f11905 | 3,530 |
def get_gifti_labels(gifti):
"""Returns labels from gifti object (*.label.gii)
Args:
gifti (gifti image):
Nibabel Gifti image
Returns:
labels (list):
labels from gifti object
"""
# labels = img.labeltable.get_labels_as_dict().values()
label_dict = gifti.labeltable.get_labels_as_dict()
labels = list(label_dict.values())
return labels | 3a4915ed50132a022e29cfed4e90905d05209484 | 3,532 |
from typing import Set
import os
def _get_mtimes(arg: str) -> Set[float]:
"""
Get the modification times of any converted notebooks.
Parameters
----------
arg
Notebook to run 3rd party tool on.
Returns
-------
Set
Modification times of any converted notebooks.
"""
return {os.path.getmtime(arg)} | c19e7ba43f6fb1d776f10e39f9fa46d05c947c72 | 3,534 |
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool:
"""
Função para verificar se a string é menor que a quantidade de caracters informados
@param palavra: A palavra a ser verificada
@param quantidade: A quantidade de caracters que deseja verificar
@return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo
"""
tamanho = len(palavra)
eh_menor = False
if tamanho < quantidade:
eh_menor = True
return eh_menor | 827469606b0b93b78b63686465decbbbc63b9673 | 3,535 |
def check_diamond(structure):
"""
Utility function to check if the structure is fcc, bcc, hcp or diamond
Args:
structure (pyiron_atomistics.structure.atoms.Atoms): Atomistic Structure object to check
Returns:
bool: true if diamond else false
"""
cna_dict = structure.analyse.pyscal_cna_adaptive(
mode="total", ovito_compatibility=True
)
dia_dict = structure.analyse.pyscal_diamond_structure(
mode="total", ovito_compatibility=True
)
return (
cna_dict["CommonNeighborAnalysis.counts.OTHER"]
> dia_dict["IdentifyDiamond.counts.OTHER"]
) | ae082d6921757163cce3ddccbca15bf70621a092 | 3,536 |
def radix_sort(arr):
"""Sort list of numberes with radix sort."""
if len(arr) > 1:
buckets = [[] for x in range(10)]
lst = arr
output = []
t = 0
m = len(str(max(arr)))
while m > t:
for num in lst:
if len(str(num)) >= t + 1:
for b_num in range(10):
idx = num // 10**t % 10
if idx == b_num:
buckets[b_num].append(num)
break
else:
output.append(num)
lst = []
for bucket in buckets:
lst += bucket
buckets = [[] for x in range(10)]
t += 1
output += lst
return output
else:
return arr | 517ab99483ac1c6cd18df11dc1dccb4c502cac39 | 3,537 |
def _rfc822_escape(header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = header.split('\n')
header = ('\n' + 8 * ' ').join(lines)
return header | 1a3cd02b057742db00ed741c40947cf4e19d1a86 | 3,540 |
import random
def generate_numbers():
"""
Function to generate 3 random digits to be guessed.
Generate 3 random in a list in order to be compare to the user's digits.
Return:
str_digits (Array): List with 3 random digits converted to String
"""
# List comprehension to generate numbers from 0 to 9 and cast it as String
str_digits = [str(num) for num in range(10)]
# Shuffle randomly the list
random.shuffle(str_digits)
return str_digits[:3] | 8efd0f579a3a0b3dc5021cd762f9ad2f5774f6be | 3,544 |
def summate2(phasevec):
"""Calculate values b'(j^vec) for combining 2 phase vectors.
Parameter:
phasevec: tuple of two phasevectors
Example:
On input (([b_1(0),b_1(1),...,b_1(L-1)], L), ([b_2(0),b_2(1),...,b_2(L'-1)], L'))
give output [b_1(0)+b_2(0), b_1(0)+b_2(1),..., b_1(1)+b_2(0),...,b_1(L-1)+b_2(L'-1)]
"""
b = [] # array for values of summed phasevector
for i in range(phasevec[0][1]):
for j in range(phasevec[1][1]):
b.append(phasevec[0][0][i] + phasevec[1][0][j])
return b | 5150c2ee29a31438bf16104eaadeb85a01f54502 | 3,545 |
import torch
def tensor_to_longs(tensor: torch.Tensor) -> list:
"""converts an array of numerical values to a tensor of longs"""
assert tensor.dtype == torch.long
return tensor.detach().cpu().numpy() | ba1788be8e353936cfc3d604d940b78a96990fd4 | 3,546 |
def requiredOneInGroup(col_name, group, dm, df, *args):
"""
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
"""
if col_name in df.columns:
# if the column name is present, return nothing
return None
else:
# if the column name is missing, return column name
return col_name | de46a4ef2f3e45381644db41d617d8c4c0845877 | 3,547 |
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id | a308931f418616417d10d3115b0f370352778533 | 3,548 |
def non_repeating(value, counts, q):
"""Finds the first non-repeating string in a stream.
Args:
value (str): Latest string received in the string
counts (dict): Dictionary of strings containing the counts to determine if string is repeated
q (Queue): Container for all strings in stream that have yet determined as being repeated
Return:
str: First non-repeating string. None if all strings are repeated.
"""
q.put(value)
if value in counts:
counts[value] += 1
else:
counts[value] = 1
while not q.empty():
if counts[q.queue[0]] > 1:
q.get()
else:
return q.queue[0]
if q.empty():
return None | fc5ec025cffa0d7230d814d3677ae640cd652349 | 3,551 |
import torch
def energy_target(flattened_bbox_targets, pos_bbox_targets,
pos_indices, r, max_energy):
"""Calculate energy targets based on deep watershed paper.
Args:
flattened_bbox_targets (torch.Tensor): The flattened bbox targets.
pos_bbox_targets (torch.Tensor): Bounding box lrtb values only for
positions within the bounding box. We use this as an argument
to prevent recalculating it since it is used for other things as
well.
pos_indices (torch.Tensor): The indices of values in
flattened_bbox_targets which are within a bounding box
max_energy (int): Max energy level possible.
Notes:
The energy targets are calculated as:
E_max \cdot argmax_{c \in C}[1 - \sqrt{((l-r)/2)^2 + ((t-b) / 2)^2}
/ r]
- r is a hyperparameter we would like to minimize.
- (l-r)/2 is the horizontal distance to the center and will be
assigned the variable name "horizontal"
- (t-b)/2 is the vertical distance to the center and will be
assigned the variable name "vertical"
- E_max is self.max_energy
- We don't need the argmax in this code implementation since we
already select the bounding boxes and their respective pixels in
a previous step.
Returns:
tuple: A 2 tuple with values ("pos_energies_targets",
"energies_targets"). Both are flattened but pos_energies_targets
only contains values within bounding boxes.
"""
horizontal = pos_bbox_targets[:, 0] - pos_bbox_targets[:, 2]
vertical = pos_bbox_targets[:, 1] - pos_bbox_targets[:, 3]
# print("Horizontals: {}".format(horizontal))
# print("Verticals: {}".format(vertical))
horizontal = torch.div(horizontal, 2)
vertical = torch.div(vertical, 2)
c2 = (horizontal * horizontal) + (vertical * vertical)
# print("c2: \n{}".format(c2))
# We use x * x instead of x.pow(2) since it's faster by about 30%
square_root = torch.sqrt(c2)
# print("Sqrt: \n{}".format(square_root))
type_dict = {'dtype': square_root.dtype,
'device': square_root.device}
pos_energies = (torch.tensor([1], **type_dict)
- torch.div(square_root, r))
pos_energies *= max_energy
pos_energies = torch.max(pos_energies,
torch.tensor([0], **type_dict))
pos_energies = pos_energies.floor()
energies_targets = torch.zeros(flattened_bbox_targets.shape[0],
**type_dict)
energies_targets[pos_indices] = pos_energies
# torch.set_printoptions(profile='full')
# print("Energy targets: \n {}".format(pos_energies))
# torch.set_printoptions(profile='default')
# input()
return pos_energies, energies_targets | 84bed4cc1a8bf11be778b7e79524707a49482b39 | 3,552 |
def enforce_excel_cell_string_limit(long_string, limit):
"""
Trims a long string. This function aims to address a limitation of CSV
files, where very long strings which exceed the char cell limit of Excel
cause weird artifacts to happen when saving to CSV.
"""
trimmed_string = ''
if limit <= 3:
limit = 4
if len(long_string) > limit:
trimmed_string = (long_string[:(limit-3)] + '...')
return trimmed_string
else:
return long_string | 9b8bcf4590dc73425c304c8d778ae51d3e3f0bf3 | 3,554 |
import requests
def is_at_NWRC(url):
"""
Checks that were on the NWRC network
"""
try:
r = requests.get(url)
code = r.status_code
except Exception as e:
code = 404
return code==200 | b909a9087940eb70b569ea6c686ff394e84a6ed9 | 3,555 |
import torch
def lmo(x,radius):
"""Returns v with norm(v, self.p) <= r minimizing v*x"""
shape = x.shape
if len(shape) == 4:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
for second_dim in range(shape[1]):
inner_x = x[first_dim][second_dim]
rows, cols = x[first_dim][second_dim].shape
v[first_dim][second_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][second_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape) == 3:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
inner_x = x[first_dim]
rows, cols = x[first_dim].shape
v[first_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape)==2:
rows, cols = x.shape
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x),0)
for col in range(cols):
v[maxIdx[col],col] = -radius*torch.sign(x[maxIdx[col],col])
else :
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x))
v.view(-1)[maxIdx] = -radius * torch.sign(x.view(-1)[maxIdx])
return v | 24bda333cdd64df9a0b4fa603211036bbdad7200 | 3,556 |
def feature_norm_ldc(df):
"""
Process the features to obtain the standard metrics in LDC mode.
"""
df['HNAP'] = df['HNAC']/df['ICC_abs']*100
df['TCC'] = (df['ICC_abs']+df['DCC_abs'])/df['VOL']
df['ICC'] = df['ICC_abs']/df['VOL']
df['DCC'] = df['DCC_abs']/df['VOL']
return df | 60e3ef31c0be07179854de3191c2c75f4ec2cb4d | 3,557 |
import uuid
def get_tablespace_data(tablespace_path, db_owner):
"""This function returns the tablespace data"""
data = {
"name": "test_%s" % str(uuid.uuid4())[1:8],
"seclabels": [],
"spcacl": [
{
"grantee": db_owner,
"grantor": db_owner,
"privileges": [
{
"privilege_type": "C",
"privilege": True,
"with_grant": False
}
]
}
],
"spclocation": tablespace_path,
"spcoptions": [],
"spcuser": db_owner
}
return data | 3272e9b941d6bfb426ed754eed7f956c4c0933f4 | 3,559 |
def join_chunks(chunks):
"""empty all chunks out of their sub-lists to be split apart again by split_chunks(). this is because chunks now
looks like this [[t,t,t],[t,t],[f,f,f,][t]]"""
return [item for sublist in chunks for item in sublist] | a5daf41ba3fa6e7dafc4f05b29cc5aeaa397d5a5 | 3,560 |
def urls_equal(url1, url2):
"""
Compare two URLObjects, without regard to the order of their query strings.
"""
return (
url1.without_query() == url2.without_query()
and url1.query_dict == url2.query_dict
) | f2cbcf111cd5d02fa053fbd373d24b2dab047dfc | 3,561 |
def bytes_to_ints(bs):
"""
Convert a list of bytes to a list of integers.
>>> bytes_to_ints([1, 0, 2, 1])
[256, 513]
>>> bytes_to_ints([1, 0, 1])
Traceback (most recent call last):
...
ValueError: Odd number of bytes.
>>> bytes_to_ints([])
[]
"""
if len(bs) % 2 != 0:
raise ValueError("Odd number of bytes.")
pairs = zip(bs[::2], bs[1::2])
return [(a << 8) + b for a, b in pairs] | e8ac9ec973ff58973703e3e109da5b45d3f9d802 | 3,562 |
def _bgp_predict_wrapper(model, *args, **kwargs):
"""
Just to ensure that the outgoing shapes are right (i.e. 2D).
"""
mean, cov = model.predict_y(*args, **kwargs)
if len(mean.shape) == 1:
mean = mean[:, None]
if len(cov.shape) == 1:
cov = cov[:, None]
return mean, cov | 23bb62927e767057df94ef8b95b57874fc078d7f | 3,563 |
import re
def snake_to_camel(action_str):
"""
for all actions and all objects unsnake case and camel case.
re-add numbers
"""
if action_str == "toggle object on":
return "ToggleObjectOn"
elif action_str == "toggle object off":
return "ToggleObjectOff"
def camel(match):
return match.group(1)[0].upper() + match.group(1)[1:] + match.group(2).upper()
action_str = re.sub(r'(.*?) ([a-zA-Z])', camel, action_str)
if action_str.startswith("Look"): # LookDown_15, LookUp_15
action_str += "_15"
if action_str.startswith("Rotate"): # RotateRight_90, RotateLeft_90
action_str += "_90"
if action_str.startswith("Move"): # MoveAhead_25
action_str += "_25"
return action_str[0].upper() + action_str[1:] | c71745c02fc712e2b463e7bcb022bfca41c2efd4 | 3,564 |
def rename_columns(df):
"""This function renames certain columns of the DataFrame
:param df: DataFrame
:type df: pandas DataFrame
:return: DataFrame
:rtype: pandas DataFrame
"""
renamed_cols = {"Man1": "Manufacturer (PE)",
"Pro1": "Model (PE)",
"Man2": "Manufacturer (BAT)",
"Pro2": "Model (BAT)",
"Top": "Type [-coupled]",
'P_PV2AC_in': 'P_PV2AC_in [W]',
'P_PV2AC_out': 'P_PV2AC_out [W]',
'U_PV_min': 'U_PV_min [V]',
'U_PV_nom': 'U_PV_nom [V]',
'U_PV_max': 'U_PV_max [V]',
'U_MPP_min': 'U_MPP_min [V]',
'U_MPP_max': 'U_MPP_max [V]',
'P_AC2BAT_in': 'P_AC2BAT_in [W]',
'P_BAT2AC_out': 'P_BAT2AC_out [W]',
'P_PV2BAT_in': 'P_PV2BAT_in [W]',
'P_BAT2PV_out': 'P_BAT2PV_out [W]',
'P_PV2BAT_out': 'P_PV2BAT_out [W]',
'P_BAT2AC_in': 'P_BAT2AC_in [W]',
'U_BAT_min': 'U_BAT_min [V]',
'U_BAT_nom': 'U_BAT_nom [V]',
'U_BAT_max': 'U_BAT_max [V]',
'E_BAT_100': 'E_BAT_100 [kWh]',
'E_BAT_50': 'E_BAT_50 [kWh]',
'E_BAT_25': 'E_BAT_25 [kWh]',
'E_BAT_usable': 'E_BAT_usable [kWh]',
'eta_BAT_100': 'eta_BAT_100',
'eta_BAT_50': 'eta_BAT_50',
'eta_BAT_25': 'eta_BAT_25',
'eta_BAT': 'eta_BAT',
'P_SYS_SOC1_AC': 'P_SYS_SOC1_AC [W]',
'P_SYS_SOC1_DC': 'P_SYS_SOC1_DC [W]',
'P_SYS_SOC0_AC': 'P_SYS_SOC0_AC [W]',
'P_SYS_SOC0_DC': 'P_SYS_SOC0_DC [W]',
'P_PVINV_AC': 'P_PVINV_AC [W]',
'P_PERI_AC': 'P_PERI_AC [W]',
'P_PV2BAT_DEV_IMPORT': 'P_PV2BAT_DEV_IMPORT [W]',
'P_PV2BAT_DEV_EXPORT': 'P_PV2BAT_DEV_EXPORT [W]',
'P_BAT2AC_DEV_IMPORT': 'P_BAT2AC_DEV_IMPORT [W]',
'P_BAT2AC_DEV_EXPORT': 'P_BAT2AC_DEV_EXPORT [W]',
't_DEAD': 't_DEAD [s]',
't_SETTLING': 't_SETTLING [s]'
}
return df.rename(columns=renamed_cols) | 9c22747d7c6da20cab1593388db5575a38aa313f | 3,565 |
import requests
import json
def get_github_emoji(): # pragma: no cover
"""Get Github's usable emoji."""
try:
resp = requests.get(
'https://api.github.com/emojis',
timeout=30
)
except Exception:
return None
return json.loads(resp.text) | 533a56e2e59b039cbc45ab5acb7ab4e8487e4ad9 | 3,566 |
def from_binary(bin_data: str, delimiter: str = " ") -> bytes:
"""Converts binary string into bytes object"""
if delimiter == "":
data = [bin_data[i:i+8] for i in range(0, len(bin_data), 8)]
else:
data = bin_data.split(delimiter)
data = [int(byte, 2) for byte in data]
return bytes(data) | f16706da2d5b9ae5984a35a13ebd02ae94581153 | 3,567 |
def one_on_f_weight(f, normalize=True):
""" Literally 1/f weight. Useful for fitting linspace data in logspace.
Parameters
----------
f: array
Frequency
normalize: boolean, optional
Normalized the weight to [0, 1].
Defaults to True.
Returns
-------
weight: array
The 1/f weight.
"""
weight = 1/f
if normalize:
weight /= max(weight)
return(weight) | 54301aa7480e6f3520cbfcccfa463a2a02d34b9c | 3,568 |
import numpy
def fft_in_range(audiomatrix, startindex, endindex, channel):
"""
Do an FFT in the specified range of indices
The audiomatrix should have the first index as its time domain and
second index as the channel number. The startindex and endinex
select the time range to use, and the channel parameter selects
which channel to do the FFT on.
Returns a vector of data in the frequency domain
"""
n = endindex - startindex
indat = audiomatrix[startindex:endindex, channel]
outdat = (numpy.fft.fft(indat)[range(n//2)])/n
return outdat | 30ce104795d0809f054439ba32f47d33528ecbff | 3,569 |
def get_at_content(sequence):
"""Return content of AT in sequence, as float between 0 and 1, inclusive. """
sequence = sequence.upper()
a_content = sequence.count('A')
t_content = sequence.count('T')
return round((a_content+t_content)/len(sequence), 2) | 6316d29cdb9d7129f225f2f79a50485fb6919e32 | 3,570 |
def replace(data, match, repl):
"""Replace values for all key in match on repl value.
Recursively apply a function to values in a dict or list until the input
data is neither a dict nor a list.
"""
if isinstance(data, dict):
return {
key: repl if key in match else replace(value, match, repl)
for key, value in data.items()
}
if isinstance(data, list):
return [replace(item, match, repl) for item in data]
return data | 1b3dc8ac7521ec199cf74ebc8f4d8777827ab9fc | 3,572 |
import time
def get_current_date() ->str:
"""Forms a string to represent the current date using the time module"""
if len(str(time.gmtime()[2])) == 1:
current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-0' + str(time.gmtime()[2])
else:
current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-' + str(time.gmtime()[2])
return current_date | 480d44fc0153407960eacb875474fc02cb17c6c3 | 3,573 |
import re
def valid_text(val, rule):
"""Return True if regex fully matches non-empty string of value."""
if callable(rule):
match = rule(val)
else:
match = re.findall(rule, val)
return (False if not match or not val else
True if match is True else
match[0] == val) | aa6f6ac3a3210d34b44eba1f2e8e8cff851ff038 | 3,577 |
def body_open():
"""open the main logic"""
return " @coroutine\n def __query(__connection):" | d8792f2b3237f024f20a12c6b7d371af1dbdb21e | 3,578 |
import os
import pickle
def load_tweet_users_posted_rumours():
"""
load user history (whether a user posted any rumour in the past)
:return: dict {timestamp at which the user posted a rumour: user_id}
"""
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tweet_users_posted_rumours'), 'rb') as outfile:
rumour_users = pickle.load(outfile)
outfile.close()
return rumour_users | 626fc152aae0b38aa4531dafb4d917100bad37c8 | 3,579 |
def check_column(board):
"""
list -> bool
This function checks if every column has different numbers and returns
True is yes, and False if not.
>>> check_column(["**** ****", "***1 ****", "** 3****", \
"* 4 1****", " 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
False
>>> check_column(["**** ****", "***1 ****", "** 3****", \
"* 4 1****", " 9 5 ", " 6 83 *", "3 5 **", " 8 2***", " 2 ****"])
True
"""
length = len(board)
for i in range(length):
one_line = []
for line in board:
if line[i] == '*' or line[i] == ' ':
continue
if line[i] in one_line:
return False
else:
one_line.append(line[i])
return True | b903d1b589cd2981cc374ff47f985151d341e7ec | 3,580 |
def capacitorCorrection(m_cap):
"""Apply a correction to the measured capacitance value
to get a value closer to the real value.
One reason this may differ is measurement varies based on frequency.
The measurements are performed at 30Hz but capacitance values
are normally quoted for 1kHz.
The coefficients are based on mutiple linear regression in R
using rms-based measurements of capacitors vs readings from multimeter
plus a fudge for small values!
"""
### These are based on 30Hz sine wave with 2x2k2 + 2x10nF
### + internal_res = 200 and no esr correction
###return -7.599263e-08 + 9.232542e-01 * m_cap + 1.690527e+04 * m_cap * m_cap
### 31Hz sine 2x2k2 + 2x10nF with internal_res = 140 no esr correction
poly2_cor = -6.168148e-08 + 8.508691e-01 * m_cap + 2.556320e+04 * m_cap * m_cap
return poly2_cor if poly2_cor > 30e-9 else m_cap * 0.2 | 1942c177d534bc5533bb636e10f0107c1230c81d | 3,581 |
from datetime import datetime
def read_raw(omega):
"""Read the raw temperature, humidity and dewpoint values from an OMEGA iServer.
Parameters
----------
omega : :class:`msl.equipment.record_types.EquipmentRecord`
The Equipment Record of an OMEGA iServer.
Returns
-------
:class:`str`
The serial number of the OMEGA iServer.
:class:`dict`
The data.
"""
nprobes = omega.connection.properties.get('nprobes', 1)
nbytes = omega.connection.properties.get('nbytes')
error = None
try:
cxn = omega.connect()
thd = cxn.temperature_humidity_dewpoint(probe=1, nbytes=nbytes)
if nprobes == 2:
thd += cxn.temperature_humidity_dewpoint(probe=2, nbytes=nbytes)
cxn.disconnect()
except Exception as e:
error = str(e)
thd = [None] * (nprobes * 3)
now_iso = datetime.now().replace(microsecond=0).isoformat(sep='T')
data = {
'error': error,
'alias': omega.alias,
'datetime': now_iso,
'report_number': None,
}
if len(thd) == 3:
data.update({
'temperature': thd[0], 'humidity': thd[1], 'dewpoint': thd[2]
})
else:
data.update({
'temperature1': thd[0], 'humidity1': thd[1], 'dewpoint1': thd[2],
'temperature2': thd[3], 'humidity2': thd[4], 'dewpoint2': thd[5]
})
return omega.serial, data | 105e07d26774288319459ebdc485d75c3a909212 | 3,582 |
from datetime import datetime, timedelta
def determine_dates_to_query_on_matomo(dates_in_database):
"""
Determines which dates need to be queried on Matomo to update the dataset.
"""
# determines which dates are missing from the database and could be queried on Matomo
# NOTE: start date was set to 2020-05-01 as May is when the portal started to be live
start_date = datetime.strptime('2020-05-01', '%Y-%m-%d').date()
end_date = (datetime.today() - timedelta(1)).date()
delta = timedelta(days=1)
dates_to_process = []
while start_date <= end_date:
if str(start_date) not in dates_in_database:
dates_to_process.append(str(start_date))
start_date += delta
return dates_to_process | 40db63fb7ff339d5c306df37cf0f4b1765b91f90 | 3,583 |
def k8s_stats_response():
"""
Returns K8s /stats/summary endpoint output from microk8s on Jetson Nano.
"""
with open("tests/resources/k8s_response.json", "r") as response_file:
response = response_file.read()
return response | 68413108eeea6bdd80a782b962f3a5c97e1a4b73 | 3,584 |
def logical_array(ar):
"""Convert ndarray (int, float, bool) to array of 1 and 0's"""
out = ar.copy()
out[out!=0] = 1
return out | 74d96d519929ed7f5ddfd92b0fbcef4741a38359 | 3,586 |
def _get_chinese_week(localtime):
"""获取星期和提醒"""
chinese_week = ["一", "二", "三", "四", "五", "六", "日"]
tm_w_day = localtime.tm_wday
extra_msg = "<green>当前正是周末啦~</green>" if tm_w_day in [5, 6] else "Other"
if extra_msg == "Other":
go_week = 4 - tm_w_day
extra_msg = f"<yellow>还有 {go_week} 天周末</yellow>" if go_week != 0 else "<blue>明天就是周末啦~坚持摸鱼~</blue>"
return chinese_week[tm_w_day], extra_msg | 0a66bcf741c0d2e3cc9a238b5cb879c89333cc6b | 3,588 |
def m_college_type(seq):
"""
获取学校的类型信息
当学校的类型是985,211工程院校时:
:param seq:【“985,211工程院校”,“本科”】
:return:“985工程院校”
当学校的类型是211工程院校时:
:param seq:【“211工程院校”,“硕士”】
:return:“211工程院校”
当学校的类型是普通本科或者专科时:
如果获取的某人的学历信息是博士、硕士和本科时
输出的学校类型为普通本科
:param seq:【“****”,“硕士”】
:return:“普通本科”
如果获取的某个人的学历信息时专科时:
输出的学校类型为专科
:param seq:【“****”,“专科”】
:return:“专科”
"""
if "985" in seq[0]:
tmp = "985,211工程院校"
return tmp
elif "211" in seq[0] and "985" not in seq[0]:
tmp = "211工程院校"
return tmp
else:
if seq[1] in ["博士", "硕士", "本科"]:
tmp = "本科"
return tmp
else:
tmp = "专科"
return tmp | bf72f60c51a67dd3e18a7dd1957bc2beb4f933fd | 3,589 |
import json
def user_config(filename):
"""user-provided configuration file"""
try:
with open(filename) as file:
return json.loads(file.read(None))
except FileNotFoundError as fnf:
raise RuntimeError(f"File '{filename}' could not be found") from fnf
except json.JSONDecodeError as jsond:
raise RuntimeError(f"Error while parsing '{filename}'") from jsond | a6aa05d76b4aaa12c02ff97e4ab5ba4ba1245324 | 3,590 |
def room_from_loc(env, loc):
"""
Get the room coordinates for a given location
"""
if loc == 'north':
return (1, 0)
if loc == 'south':
return (1, 2)
if loc == 'west':
return (0, 1)
if loc == 'east':
return (2, 1)
if loc == 'left':
return (1, 0)
if loc == 'right':
return (1, 2)
if loc == 'front':
return (2, 1)
if loc == 'behind':
return (0, 1)
# By default, use the central room
return (1, 1) | 75192c47fd8d4b56332b35ec7c3b355927e50ca2 | 3,591 |
def shifted(x):
"""Shift x values to the range [-0.5, 0.5)"""
return -0.5 + (x + 0.5) % 1 | c40585748120af5d0acd85e4fed49f0575a92a3d | 3,592 |
def run_node(node):
"""Python multiprocessing works strangely in windows. The pool function needed to be
defined globally
Args:
node (Node): Node to be called
Returns:
rslts: Node's call output
"""
return node.run_with_loaded_inputs() | a0f52020db20b4b67e83599bc0fb6c86ec2f9514 | 3,593 |
def get_base_required_fields():
""" Get required fields for base asset from UI.
Fields required for update only: 'id', 'uid', ['lastModifiedTimestamp', 'location', 'events', 'calibration']
Present in input, not required for output:
'coordinates', 'hasDeploymentEvent', 'augmented', 'deployment_numbers', 'deployment_number',
'Ref Des', 'depth',
2016-08-24: removed 'coordinates'
2016-08-26: removed 'augmented', 'Ref Des', 'remoteDocuments', 'hasDeploymentEvent',
2016-09-26: removed 'tense',
2016-10-11: removed 'tense',
"""
base_required_fields = [
'assetInfo',
'assetType',
'dataSource',
'deployment_numbers',
'deployment_number',
'depth',
'editPhase',
'latitude',
'longitude',
'manufactureInfo',
'mobile',
'notes',
'orbitRadius',
'partData',
'physicalInfo',
'purchaseAndDeliveryInfo',
'ref_des',
'remoteResources',
'uid'
]
return base_required_fields | 273c539d0c0b0da249e2bb171107aa775ce52ddf | 3,594 |
import os
import sys
def find_package(dir):
"""
Given a directory, finds the equivalent package name. If it
is directly in sys.path, returns ''.
"""
dir = os.path.abspath(dir)
orig_dir = dir
path = map(os.path.abspath, sys.path)
packages = []
last_dir = None
while 1:
if dir in path:
return '.'.join(packages)
packages.insert(0, os.path.basename(dir))
dir = os.path.dirname(dir)
if last_dir == dir:
raise ValueError(
"%s is not under any path found in sys.path" % orig_dir)
last_dir = dir | 0bc904165620daa2f408a3f1c526bfe4a34def97 | 3,596 |
def Window(node, size=-1, full_only=False):
"""Lazy wrapper to collect a window of values. If a node is executed 3 times,
returning 1, 2, 3, then the window node will collect those values in a list.
Arguments:
node (node): input node
size (int): size of windows to use
full_only (bool): only return if list is full
"""
def foo(node=node, size=size, full_only=full_only):
if size == 0:
return node.value()
if ret._accum is None:
ret._accum = [node.value()]
elif ret.dependencyIsDirty(node):
ret._accum.append(node.value())
if size > 0:
ret._accum = ret._accum[-size:]
if full_only and len(ret._accum) == size:
return ret._accum
elif full_only:
return None
return ret._accum
# make new node
ret = node._gennode("Window[{}]".format(size if size > 0 else "∞"), foo, [node])
ret._accum = None
return ret | 1f85b576455f3b379e41a7247ff486281bf21f8f | 3,597 |
def add_colon(in_str):
"""Add colon after every 4th character."""
return ':'.join([in_str[i:i+4] for i in range(0, len(in_str), 4)]) | fa4258aa9d684a087d2a81ae09a2702d6e58e3e1 | 3,598 |
def get_alt_pos_info(rec):
"""Returns info about the second-most-common nucleotide at a position.
This nucleotide will usually differ from the reference nucleotide, but it
may be the reference (i.e. at positions where the reference disagrees with
the alignment's "consensus").
This breaks ties arbitrarily.
Parameters
==========
rec: dict
pysamstats record for a given position in an alignment produced
by stat_variation().
Returns
=======
(cov, alt nt freq, alt nt): tuple of (int, int, str)
Describes the second-most-common nucleotide at a position.
The first entry in this tuple is the (mis)match coverage at this
position. This is an integer defined as the sum of A, C, G, T
nucleotides at this position (note that this excludes degenerate
nucleotides like N -- we could change this in the future if that'd be
useful, I suppose). Note that this coverage could be zero, if no reads
are aligned to this specific position.
The second entry is the raw frequency of this nucleotide
at this position: this will be an integer greater than or equal to 0.
This is also referred to in the paper, etc. as alt(pos).
The third entry is just the alternate nucleotide (one of A, C, G, T),
represented as a string. This is returned for reference -- as of
writing this isn't actually needed for Pleuk itself, but I have other
code outside of Pleuk that benefits from this!
"""
cov = rec["A"] + rec["C"] + rec["G"] + rec["T"]
ordered_nts = sorted("ACGT", key=rec.get)
# The literal nucleotide used in the numerator of freq(pos): one of A, C,
# G, T
alt_nt = ordered_nts[-2]
# The raw frequency (in counts) of alt_nt. An integer >= 0.
alt_nt_freq = rec[alt_nt]
return (cov, alt_nt_freq, alt_nt) | 3abe3fcbbf0ddbccb44025f2e476f77dc3e8abf9 | 3,599 |
import threading
def handle_readable(client):
"""
Return True: The client is re-registered to the selector object.
Return False: The server disconnects the client.
"""
data = client.recv(1028)
if data == b'':
return False
client.sendall(b'SERVER: ' + data)
print(threading.active_count())
return True | 9a77bb893a5da4e76df5593feb6ecf49022e6ef3 | 3,601 |
import numpy
def create_objective(dist, abscissas):
"""Create objective function."""
abscissas_ = numpy.array(abscissas[1:-1])
def obj(absisa):
"""Local objective function."""
out = -numpy.sqrt(dist.pdf(absisa))
out *= numpy.prod(numpy.abs(abscissas_ - absisa))
return out
return obj | c63eeadffd067c2a94470ddbf03fb009265fbbbc | 3,602 |
def _is_segment_in_block_range(segment, blocks):
"""Return whether the segment is in the range of one of the blocks."""
for block in blocks:
if block.start <= segment.start and segment.end <= block.end:
return True
return False | e7509f18f0a72cf90fb1aa643c77c2e13154f0d0 | 3,603 |
def generate_episode(sim, policy, horizon=200):
"""
Generate an episode from a policy acting on an simulation.
Returns: sequence of state, action, reward.
"""
obs = sim.reset()
policy.reset() # Reset the policy too so that it knows its the beginning of the episode.
states, actions, rewards = [], [], []
states.append(obs)
for _ in range(horizon):
action = policy.act(obs)
obs, reward, done, _ = sim.step(action)
states.append(obs)
actions.append(action)
rewards.append(reward)
if done:
break
states.pop() # Pop off the terminating state
return states, actions, rewards | 73a0bbb2703c047d3305e93dd2a340c83db12277 | 3,605 |
import torch
def disparity_to_idepth(K, T_right_in_left, left_disparity):
"""Function athat transforms general (non-rectified) disparities to inverse
depths.
"""
assert(len(T_right_in_left.shape) == 3)
# assert(T_right_in_left.shape[0] == self.batch_size)
assert(T_right_in_left.shape[1] == 4)
assert(T_right_in_left.shape[2] == 4)
assert(len(K.shape) == 3)
# assert(K.shape[0] == self.batch_size)
assert(K.shape[1] == 4)
assert(K.shape[2] == 4)
batch_size = K.shape[0]
rows = left_disparity.shape[-2]
cols = left_disparity.shape[-1]
# Create tensor of homogeneous pixel coordinates of size (batch, 3, rows*cols).
y_grid, x_grid = torch.meshgrid(torch.arange(0, rows, device=left_disparity.device),
torch.arange(0, cols, device=left_disparity.device))
xys = torch.cat([x_grid.reshape(-1, rows * cols).float(),
y_grid.reshape(-1, rows * cols).float()], dim=0)
xys = xys.unsqueeze(0).repeat(batch_size, 1, 1)
ones = torch.ones(batch_size, 1, rows * cols, dtype=torch.float32, device=xys.device)
xyz_pix = torch.cat([xys, ones], 1)
Kinv = torch.inverse(K)
T_left_in_right = torch.inverse(T_right_in_left)
R_left_in_right = T_left_in_right[:, :3, :3]
KRKinv = torch.matmul(K[:, :3, :3], torch.matmul(R_left_in_right, Kinv[:, :3, :3]))
KRKinv3 = KRKinv[:, 2, :] # (batch, 3)
KRKinv3_rep = torch.unsqueeze(KRKinv3, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols)
KT_left_in_right = torch.matmul(K, T_left_in_right)
Kt = KT_left_in_right[:, :3, 3] # (batch, 3)
Kt_rep = torch.unsqueeze(Kt, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols)
# (batch, rows*cols)
left_disparity_flat = left_disparity.reshape(batch_size, -1)
# Compute pixels at infinite depth.
pix_inf = torch.matmul(KRKinv, xyz_pix) # (batch, 3, rows*cols)
pix_inf[:, 0, :] /= pix_inf[:, 2, :]
pix_inf[:, 1, :] /= pix_inf[:, 2, :]
pix_inf[:, 2, :] /= pix_inf[:, 2, :]
# Compute epipolar lines (must point from far to near depth).
pix_far = torch.matmul(KRKinv, xyz_pix * 1e2)
pix_far += Kt_rep
pix_far[:, 0, :] /= pix_far[:, 2, :]
pix_far[:, 1, :] /= pix_far[:, 2, :]
pix_far[:, 2, :] /= pix_far[:, 2, :]
epi_diff = pix_far[:, :2, :] - pix_inf[:, :2, :]
epi_norm = torch.sqrt(torch.sum(epi_diff**2, dim=1))
epiline = epi_diff[:, :2, :] # (batch, 2, rows*cols)
epiline[:, 0, :] /= (epi_norm + 1e-6)
epiline[:, 1, :] /= (epi_norm + 1e-6)
mask = epi_norm < 1e-6
mask = mask.reshape(batch_size, 1, rows, cols)
# Convert disparity to idepth.
# (batch, rows*cols)
w = KRKinv3_rep[:, 0, :] * xyz_pix[:, 0, :] + \
KRKinv3_rep[:, 1, :] * xyz_pix[:, 1, :] + \
KRKinv3_rep[:, 2, :]
# (batch, rows*cols)
A0 = Kt_rep[:, 0, :] - Kt_rep[:, 2, :]*(pix_inf[:, 0, :] + left_disparity_flat * epiline[:, 0, :])
A1 = Kt_rep[:, 1, :] - Kt_rep[:, 2, :]*(pix_inf[:, 1, :] + left_disparity_flat * epiline[:, 1, :])
b0 = w * left_disparity_flat * epiline[:, 0, :]
b1 = w * left_disparity_flat * epiline[:, 1, :]
ATA = A0 * A0 + A1 * A1
ATb = A0 * b0 + A1 * b1
left_idepthmap = ATb / ATA
left_idepthmap = left_idepthmap.reshape(batch_size, 1, rows, cols)
# Set bad points to 0 idepth.
left_idepthmap = (~mask).float() * left_idepthmap
return left_idepthmap | 454bda2fd9ec4e4ef5615dbdb054c2f3b454f31a | 3,607 |
import math
def foo(X):
"""The function to evaluate"""
ret = []
for x in X:
r = 2*math.sqrt(sum([n*n for n in x]));
if r == 0:
ret.append(0)
else:
ret.append(math.sin(r) / r);
return ret | 7b241cf45757cdf9a5a28ee56c59ee41099ccb1e | 3,608 |
def pre_process(image):
"""
Invert pixel intensity of 'images' (to compensate the conversion into image with imwrite).
"""
return 1 - image * 255 | 7e7227930567c31874d966ce18aeeffa9b73e646 | 3,613 |
import os
def is_source_path(path):
"""Check if path is source code path.
Parameters
----------
path : str
A possible path
Returns
-------
valid : bool
Whether path is a possible source path
"""
if os.path.exists(path):
return True
if path.find("\n") != -1:
return False
spath = path.rsplit(".", 1)
return len(spath) == 2 and spath[1].strip() == spath[1] | f932049eb70275ad8e394ec21af8bbf9ef2c3880 | 3,614 |
def value_or_dash(value):
"""Converts the given value to a unicode dash if the value does
not exist and does not equal 0."""
if not value and value != 0:
return u'\u2013'.encode('utf-8')
return value | 8cadbfd8dcfad9dfeb4112cb8537f0e0d5de49ba | 3,615 |
import pkg_resources
def get_resource(name):
"""Convenience method for retrieving a package resource."""
return pkg_resources.resource_stream(__name__, name) | 63aada8f6e99956b770bd9ea7f737d90432c3f90 | 3,617 |
def error_message() -> str:
"""Error message for invalid input"""
return 'Invalid input. Use !help for a list of commands.' | 2ffea48dd495d464264bc657ca62cfe6043a1084 | 3,618 |
from typing import Counter
def palindrome_permutation(string):
"""
All palindromes follow the same rule, they have at most one letter whose
count is odd, this letter being the "pivot" of the palindrome. The letters
with an even count can always be permuted to match each other across the
pivot.
"""
string = string.strip().lower()
c = Counter(string)
l = [1 for letter_count in c.values() if letter_count % 2 == 1]
return sum(l) < 2 | a1e5721d73e9773d802b423747277dd43ee5983f | 3,620 |
def symmetrise_AP(AP):
"""
No checks on this since this is a deep-inside-module helper routine.
AP must be a batch of matrices (n, 1, N, N).
"""
return AP + AP.transpose(2, 3) | 4a993f42e576656ec5f450c95af969722f10a58d | 3,621 |
def get_argument(value, arg):
"""Get argument by variable"""
return value.get(arg, None) | 0abd48a3a241ab1076c3ca19241df5b7b4346224 | 3,622 |
def get_target_proportions_of_current_trial(individuals, target):
"""Get the proportion waiting times within the target for a given trial of
a threshold
Parameters
----------
individuals : object
A ciw object that contains all individuals records
Returns
-------
int
all ambulance patients that finished the simulation
int
all ambulance patients whose waiting times where within the target
int
all other patients that finished the simulation
int
all other patients whose waiting times where within the target
"""
ambulance_waits, ambulance_target_waits = 0, 0
other_waits, other_target_waits = 0, 0
for individual in individuals:
ind_class = len(individual.data_records) - 1
rec = individual.data_records[-1]
if rec.node == 2 and ind_class == 0:
other_waits += 1
if rec.waiting_time < target:
other_target_waits += 1
elif rec.node == 2 and ind_class == 1:
ambulance_waits += 1
if rec.waiting_time < target:
ambulance_target_waits += 1
return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits | 95f3781677f3ca7bb620488778b52502783c6eb9 | 3,623 |
def how_many(aDict):
"""
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
"""
return sum(len(value) for value in aDict.values()) | ed1729b55411f29626dfe61c6853bc19813ceedc | 3,624 |
def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
x, y, a, s = keypoint
x1, y1, x2, y2 = crop_coords
cropped_keypoint = [x - x1, y - y1, a, s]
return cropped_keypoint | 5a2365a611275fea4d0f5d031127426c88c43905 | 3,625 |
def string_between(string, start, end):
"""
Returns a new string between the start and end range.
Args:
string (str): the string to split.
start (str): string to start the split at.
end (str): string to stop the split at.
Returns:
new string between start and end.
"""
try:
return str(string).split(str(start), 1)[1].split(str(end))[0]
except IndexError:
return "" | fc6f2a3def4112140539c90abe6304f5daa8c1f4 | 3,626 |
def standardize_str(string):
"""Returns a standardized form of the string-like argument.
This will convert from a `unicode` object to a `str` object.
"""
return str(string) | ea007582363cd1eeee34d4b342a39581fd876c3a | 3,627 |
def infinitegenerator(generatorfunction):
"""Decorator that makes a generator replay indefinitely
An "infinite" parameter is added to the generator, that if set to True
makes the generator loop indifenitely.
"""
def infgenerator(*args, **kwargs):
if "infinite" in kwargs:
infinite = kwargs["infinite"]
del kwargs["infinite"]
else:
infinite = False
if infinite == True:
while True:
for elem in generatorfunction(*args, **kwargs):
yield elem
else:
for elem in generatorfunction(*args, **kwargs):
yield elem
return infgenerator | 6915a16dd765195e0344b5ebd255c1aca7737699 | 3,628 |
import os
def get_files(directory):
"""Gets full path of all files within directory, including subdirectories
Returns a list of paths"""
file_paths = []
for root, dirs, files in os.walk(directory):
for f in files:
filepath = os.path.join(root, f)
file_paths.append(filepath)
return file_paths | f0ad1ef92c5e00ef637c0b874acefa3344e64395 | 3,631 |
def load_labels(label_path):
"""
Load labels for VOC2012, Label must be maded txt files and like my label.txt
Label path can be change when run training code , use --label_path
label : { label naem : label color}
index : [ [label color], [label color]]
"""
with open(label_path, "r") as f:
lines = f.readlines()
label = {}
index = []
for line in lines:
sp = line.split()
label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]
index.append([int(sp[3]), int(sp[2]), int(sp[1])])
return label, index | 9c0388eb533293912b95ca020cbf3c9e9cb331d3 | 3,633 |
import argparse
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(description='Install cert on device.')
parser.add_argument(
'-n', '--cert-name', default='dummycert', help='certificate name')
parser.add_argument(
'--overwrite', default=False, action='store_true',
help='Overwrite certificate file if it is already installed')
parser.add_argument(
'--remove', default=False, action='store_true',
help='Remove certificate file if it is installed')
parser.add_argument(
'--device-id', help='device serial number')
parser.add_argument(
'--adb-path', help='adb binary path')
parser.add_argument(
'cert_path', help='Certificate file path')
return parser.parse_args() | 94070336606f7ed68dfb479bcdd6ee79629c6888 | 3,635 |
import click
def num_physical_shards_option(f):
"""
Function to parse/validate the --num-physical-shards CLI option to dirbs-db repartition.
:param f: obj
:return: options obj
"""
def callback(ctx, param, value):
if value is not None:
if value < 1 or value > 100:
raise click.BadParameter('Number of physical IMEI shards must be between 1 and 100')
return value
return click.option('--num-physical-shards',
expose_value=True,
type=int,
help='The number of physical IMEI shards that tables in DIRBS Core should be split into.',
callback=callback)(f) | f53eb8003533da0f8562456517110ad92beeea01 | 3,637 |
def row_to_columns(row):
"""Takes a row as a string and returns it as a list of columns."""
return [column for column in row.split() if column.strip() != ''] | 837477f2e9c160b93c339a9753e0598ac56c819e | 3,639 |
def test_shift_to_other_frame(hlwm, direction, frameindex, clients_per_frame):
"""
in a frame grid with 3 columns, where the middle column has 3 rows, we put
the focused window in the middle, and then invoke 'shift' with the given
'direction'. Then, it is checked that the window stays focused but now
resides in the frame with the given 'frameindex'
"""
winid, _ = hlwm.create_client()
def otherclients():
# put 'otherclients'-many clients in every other frame
winids = hlwm.create_clients(clients_per_frame)
return ' '.join(winids)
layout_131 = f"""
(split horizontal:0.66:0
(split horizontal:0.5:1
(clients vertical:0 {otherclients()})
(split vertical:0.66:0
(split vertical:0.5:1
(clients vertical:0 {otherclients()})
(clients vertical:0 {winid}))
(clients vertical:0 {otherclients()})))
(clients vertical:0 {otherclients()}))
"""
hlwm.call(['load', layout_131])
assert hlwm.attr.clients.focus.winid() == winid
assert hlwm.attr.tags.focus.tiling.focused_frame.index() == '0101'
hlwm.call(['shift', direction])
# the window is still focused
assert hlwm.attr.clients.focus.winid() == winid
# but it's now in another frame
assert hlwm.attr.tags.focus.tiling.focused_frame.index() == frameindex | afeb04d178bd729fccae01118bc59e8e7b0c09dc | 3,642 |
def is_circular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
The way we'll do this is by having two pointers, called "runners", moving
through the list at different rates. Typically we have a "slow" runner
which moves at one node per step and a "fast" runner that moves at two
nodes per step.
If a loop exists in the list, the fast runner will eventually move behind
the slow runner as it moves to the beginning of the loop. Eventually it will
catch up to the slow runner and both runners will be pointing to the same
node at the same time. If this happens then you know there is a loop in
the linked list. Below is an example where we have a slow runner
and a fast runner (the red arrow).
"""
slow = linked_list.head
fast = linked_list.head
#as fast runner will reach end first if there is no loop so
#adding a None check on just fast should be enough
while fast and fast.next:
slow = slow.next
#move fast runner 2 times to make it fast as compared to slow runner
fast = fast.next.next
if fast == slow:
return True
# If we get to a node where fast doesn't have a next node or doesn't exist itself,
# the list has an end and isn't circular
return False | 5a641df602f983de78c9c74b825847412aa54c21 | 3,645 |
def precisionatk_implementation(y_true, y_pred, k):
"""Fujnction to calculate precision at k for a given sample
Arguments:
y_true {list} -- list of actual classes for the given sample
y_pred {list} -- list of predicted classes for the given sample
k {[int]} -- top k predictions we are interested in
"""
# if k = 0 return 0 as we should never have k=0
# as k is always >=1
if k == 0:
return 0
# as we are interested in top k predictions
y_pred = y_pred[:k]
# convert predictions to set
pred_set = set(y_pred)
# convert actual values to set
true_set = set(y_true)
# find comon values in both
common_values = pred_set.intersection(true_set)
# return length of common values over k
return len(common_values) / len(y_pred[:k]) | 945caa95b32681939569ca675475e2527dbdee78 | 3,647 |
def AskNumber(text="unknown task"):
"""
Asks the user to interactively input a number (float or int) at any point in the script, and returns the input number.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify for what purpose the chosen number will be used.
"""
def ValidateNumber(text):
try:
innumber = input("\n\nwrite a comma or integer number to use for "+str(text)+" (example: 15 or 15.83)\nnumber = ")
except NameError:
print("""\n---> unknown error""")
return ValidateNumber(text)
if not isinstance(innumber,(float,int)):
print("""\n---> error: the number must be either a floating point comma or integer number""")
return ValidateNumber(text)
return innumber
return ValidateNumber(text) | 41949d0a2e2d87b5cdb26d2db9bff9a64fbeeb1d | 3,648 |
def get_unique_output_values(signals):
"""
Based on segment length, determine how many of the possible four
uniquely identifiable digits are in the set of signals.
"""
unique_digit_count = 0
for signal in signals:
for digit in signal["output"]:
if len(digit) in (2, 3, 4, 7):
unique_digit_count += 1
return unique_digit_count | 84098d4d294bfdd1b983ea70d51da1453b17245a | 3,649 |
import itertools
def split_and_pad(s, sep, nsplit, pad=None):
""" Splits string s on sep, up to nsplit times.
Returns the results of the split, pottentially padded with
additional items, up to a total of nsplit items.
"""
l = s.split(sep, nsplit)
return itertools.chain(l, itertools.repeat(None, nsplit+1-len(l))) | 6c439301df7109d9b01a06a87bd7d6adafb8ee1e | 3,650 |
def transpose_report(report):
"""Transposes the report. Columns into rows"""
return list(map(list, zip(*report))) | bc59f9106496b0b830fdc9ac0266f3b774a8f759 | 3,651 |
def _shape_from_resolution(resolution):
"""
Calculate the shape of the global Earth relief grid given a resolution.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Returns
-------
shape : (nlat, nlon)
The calculated shape.
Examples
--------
>>> _shape_from_resolution('60m')
(181, 361)
>>> _shape_from_resolution('30m')
(361, 721)
>>> _shape_from_resolution('10m')
(1081, 2161)
"""
minutes = int(resolution[:2])
nlat = 180*60//minutes + 1
nlon = 360*60//minutes + 1
return (nlat, nlon) | c726d599696cee2259bc450606e63480b0991451 | 3,652 |
def get_fuel_from(mass: int) -> int:
"""Gets fuel from mass.
Args:
mass (int): mass for the fuel
Returns:
int: fuel necessary for the mass
"""
return mass // 3 - 2 | 37390c8cb9ba7e84c7b5c14841528d6c38f1589e | 3,653 |
import json
import sys
def read_config():
"""
Reads the configuration info into the cfg dictionary.
:return: A dictionary with the SSH-IPS configuration variables.
"""
CONFIG_FILE = '/etc/ssh-ips/config.json'
try:
with open(CONFIG_FILE, "r") as f:
cfg = json.load(f)
except ValueError as e:
print(str(e))
sys.exit()
return cfg | 325e2e63fdf47c892ab432930de3e835faf1831d | 3,654 |
def minimaldescriptives(inlist):
"""this function takes a clean list of data and returns the N, sum, mean
and sum of squares. """
N = 0
sum = 0.0
SS = 0.0
for i in range(len(inlist)):
N = N + 1
sum = sum + inlist[i]
SS = SS + (inlist[i] ** 2)
mean = sum / float(N)
return N, sum, mean, SS | ca1d821ef64b93218bdb22268bfdde737f2d731c | 3,655 |