content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def has_no_duplicates(input_):
"""Check that a list contains no duplicates.
For example:
['aa', 'bb', 'cc'] is valid.
['aa', 'bb', 'aa'] is not valid. The word aa appears more than once.
"""
return len(input_) == len(set(input_)) | 6bc1b29b3509e4b17523408ea362591cace8d05d | 709,353 |
def _getBestSize(value):
"""
Give a size in bytes, convert it into a nice, human-readable value
with units.
"""
if value >= 1024.0**4:
value = value / 1024.0**4
unit = 'TB'
elif value >= 1024.0**3:
value = value / 1024.0**3
unit = 'GB'
elif value >= 1024.0**2:
value = value / 1024.0**2
unit = 'MB'
elif value >= 1024.0:
value = value / 1024.0
unit = 'kB'
else:
unit = 'B'
return value, unit | 6c1859c50edcbd5715443fbf30775eeee83d6a0c | 709,354 |
def inVolts(mv):
""" Converts millivolts to volts... you know, to keep the API
consistent. """
return mv/1000.0 | 6c92195996be1aa2bd52aa0a95d247f7fdef5955 | 709,355 |
from typing import IO
import mimetypes
def guess_mime_type(file_object: IO) -> str:
"""Guess mime type from file extension."""
mime_type, _encoding = mimetypes.guess_type(file_object.name)
if not mime_type:
mime_type = "application/octet-stream"
return mime_type | 12e6e6667b08eaaa24b822c37d56055c1487a801 | 709,356 |
import os
def _resource_path_dev(relative_path):
"""
:return: Package relative path to resource
"""
base_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(base_path, relative_path) | 8cdf30f3fa62fb824dcdc70bf9d2627b74f66110 | 709,357 |
def replace_word_choice(sentence: str, old_word: str, new_word: str) -> str:
"""Replace a word in the string with another word.
:param sentence: str - a sentence to replace words in.
:param old_word: str - word to replace
:param new_word: str - replacement word
:return: str - input sentence with new words in place of old words
"""
return sentence.replace(old_word, new_word) | 27d0eae1aa12538c570fec3aa433d59c40556592 | 709,358 |
def append_slash(url):
"""Make sure we append a slash at the end of the URL otherwise we
have issues with urljoin Example:
>>> urlparse.urljoin('http://www.example.com/api/v3', 'user/1/')
'http://www.example.com/api/user/1/'
"""
if url and not url.endswith('/'):
url = '{0}/'.format(url)
return url | 3d8f009f0f7a2b93e2c9ed3fee593bbcf0f25c4f | 709,359 |
def test_if_tech_defined(enduse_fueltypes_techs):
"""Test if a technology has been configured,
i.e. a fuel share has been assgined to one of the
fueltpyes in `fuel_shares`.
Arguments
---------
enduse_fueltypes_techs : dict
Configured technologies and fuel shares of an enduse
Returns
-------
c_tech_defined : bool
Criteria whether technologies have been configured
for an enduse or not
"""
c_tech_defined = False
for fueltype in enduse_fueltypes_techs:
if enduse_fueltypes_techs[fueltype] == {}:
pass
else:
c_tech_defined = True
break
return c_tech_defined | a727b375dc1bc7e76fe63090d8e278013fa2c6bb | 709,360 |
def classification_result(y, y_pred):
"""
:param y:
:param y_pred:
:return:
"""
assert len(y) == len(y_pred)
correct = []
wrong = []
for i in range(len(y)):
if y[i] == y_pred[i]:
correct.append(i)
else:
wrong.append(i)
return correct, wrong | bdab32eeded40691a721fe8e1463819605c5639c | 709,361 |
def map_class_to_id(classes):
"""
Get a 1-indexed id for each class given as an argument
Note that for MASATI, len(classes) == 1 when only considering boats
Args:
classes (list): A list of classes present in the dataset
Returns:
dict[str, int]
"""
class_ids = list(range(1, len(classes) + 1))
return dict(zip(classes, class_ids)) | 7c2b47249f61f446327c0a798c1a129c62fde6b3 | 709,362 |
import os
import subprocess
def check_qe_completed(folder,prefix,output_file,calc_type='pw'):
"""
Check if qe calculation has correctly completed.
- folder: where the calculation has been run.
- prefix: qe prefix
- output_file: name of output file
- calc_type: either 'pw' or 'ph' or 'gkkp'
"""
status = True
# If save folder does not exist, return False (= NOT completed) immediately
if calc_type=='pw' and not os.path.isdir('%s/%s.save'%(folder,prefix)):
status = False
return status
elif calc_type=='ph' and not os.path.isdir('%s/_ph0'%folder):
status = False
return status
elif calc_type=='gkkp' and not os.path.isdir('%s/elph_dir'%folder):
status = False
return status
if calc_type != 'pw' and calc_type != 'ph' and calc_type != 'gkkp':
raise ValueError("calc_type not recognised: it has to be either 'pw' or 'ph' or 'gkkp'.")
# Next, check if output is correctly completed
try:
check = subprocess.check_output("grep JOB %s/%s*"%(folder,output_file), shell=True, stderr=subprocess.STDOUT)
check = check.decode('utf-8')
check = check.strip().split()[-1]
except subprocess.CalledProcessError as e:
check = ""
if check != "DONE.": status = False
return status | 778375406379996d86d0e033da8531566f8fa7dd | 709,363 |
import os
def _is_file_not_empty(file_path):
"""Return True when buildinfo file is not empty"""
# NOTE: we can assume, that when file exists, all
# content have been dowloaded to the directory.
return os.path.getsize(file_path) > 0 | 08ef68719eaf57adbb946412dc259ea3d42117d1 | 709,364 |
import os
def check_file_exists(filename):
"""Try to open the file `filename` and return True if it's valid """
return os.path.exists(filename) | 93edc12c8d87863b560637f0bff73f0545f38270 | 709,365 |
def get_output_detections_image_file_path(input_file_path, suffix="--detections"):
"""Get the appropriate output image path for a given image input.
Effectively appends "--detections" to the original image file and
places it within the same directory.
Parameters
-----------
input_file_path: str
Path to input image.
suffix: str
Suffix appended to the file.
Default: "--detections"
Returns
-------
str
Full path for detections output image.
"""
input_file_path = input_file_path.replace('--original.', '.')
input_file_paths = input_file_path.split('.')
input_file_paths[-2] = input_file_paths[-2]+suffix
return '.'.join(input_file_paths) | b8d060dff6800750c418c70c61bd4d8e0b7bb416 | 709,366 |
from typing import Callable
def partial(fn: Callable, *args, **kwargs) -> Callable:
"""Takes a function and fewer than normal arguments, and returns a function
That will consume the remaining arguments and call the function"""
def partial_fn(*rem_args, **rem_kwargs):
return fn(*args, *rem_args, **kwargs, **rem_kwargs)
return partial_fn | 80f0df16915593fa0c5212e7560626db78147da6 | 709,368 |
import re
def parse_lipid(name):
"""
parse_lipid
description:
parses a lipid name into lipid class and fatty acid composition, returning a
dictionary with the information. Handles total fatty acid composition, as well
as individual composition, examples:
PC(38:3) --> class: PC, n_carbon: 38, n_unsat: 3
PC(18:1/20:2) --> class: PC, n_carbon: 38, n_unsat: 3,
fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 20, n_unsat: 2))
Also, handles special fatty acid notations (modifiers) used for ceramides and
plasmalogen lipids, examples:
Cer(d36:2) --> class: Cer, n_carbon: 36, n_unsat: 2, fa_mod: d
Cer(d18:1/18:1) --> class: PC, n_carbon: 38, n_unsat: 3, fa_mod: d,
fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 18, n_unsat: 1))
PE(p40:4) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p
PE(p20:2/20:2) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p,
fa_comp: ((n_carbon: 20, n_unsat: 2), (n_carbon: 20, n_unsat: 2))
lipid name must conform to the general format:
<lipid_class>([modifier]<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>]])
parameters:
name (str) -- lipid name to parse
returns:
(dict or None) -- parsed lipid information (always contains 'class', 'n_carbon', and 'n_unsat'
attributes) or None if it cannot be parsed as a lipid
"""
parsed = {}
# compile regex pattern
l_pat = re.compile(
r"^(?P<cls>[A-Za-z123]+)\((?P<mod>[pdoe]*)(?P<fc1>[0-9]+):(?P<fu1>[0-9]+)/*((?P<fc2>[0-9]+):(?P<fu2>[0-9]+))*/*((?P<fc3>[0-9]+):(?P<fu3>[0-9]+))*\)")
# parse the name using regex
l_res = l_pat.match(name)
if l_res:
# lipid class (required)
if l_res.group('cls'):
parsed["lipid_class"] = l_res.group('cls')
else:
# msg = "parse_lipid: failed to parse lipid class for: {}".format(name)
# raise ValueError(msg)
return None
# value error due to failure to parse fatty acid composition
# def raise_fatty_acid_value_error():
# msg = "parse_lipid: failed to parse fatty acid composition for: {}".format(name)
# raise ValueError(msg)
# fc1 and fu1 are always required
if not l_res.group('fc1') or not l_res.group('fu1'):
# raise_fatty_acid_value_error()
return None
# check if a second fatty acid composition is supplied, e.g. (18:1/16:0)
# if so, need to compute total fatty acid composition and add individual
# fatty acids to a list
if l_res.group('fc2'):
if not l_res.group('fu2'):
# raise_fatty_acid_value_error()
return None
# add info from the first two fatty acid compositions
fc1, fu1 = int(l_res.group('fc1')), int(l_res.group('fu1'))
fc2, fu2 = int(l_res.group('fc2')), int(l_res.group('fu2'))
parsed["fa_comp"] = [
{"n_carbon": fc1, "n_unsat": fu1},
{"n_carbon": fc2, "n_unsat": fu2}
]
# check for 3rd FA composition
fc3, fu3 = 0, 0
if l_res.group('fc3'):
if not l_res.group('fu3'):
# raise_fatty_acid_value_error()
return None
fc3, fu3 = int(l_res.group('fc3')), int(l_res.group('fu3'))
parsed["fa_comp"].append({"n_carbon": fc3, "n_unsat": fu3})
# compute total fatty acid composition
parsed["n_carbon"] = fc1 + fc2 + fc3
parsed["n_unsat"] = fu1 + fu2 + fc3
else:
# fc1 and fu1 are the total fatty acid composition
parsed["n_carbon"] = int(l_res.group('fc1'))
parsed["n_unsat"] = int(l_res.group('fu1'))
# add fatty acid modifier if present
if l_res.group('mod'):
parsed["fa_mod"] = l_res.group('mod')
else:
# could not parse name as a lipid
parsed = None
return parsed | 31a26cf57edfd08c6025c07982b7d6805704088e | 709,369 |
import re
import os
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None | 613db9d8ae230bfba064e16cf4fe1712b061be91 | 709,370 |
def either(a, b):
"""
:param a: Uncertain value (might be None).
:param b: Default value.
:return: Either the uncertain value if it is not None or the default value.
"""
return b if a is None else a | 3fd2f99fa0851dae6d1b5f11b09182dbd29bb8c1 | 709,371 |
def get_app_label_and_model_name(path):
"""Gets app_label and model_name from the path given.
:param str path: Dotted path to the model (without ".model", as stored
in the Django `ContentType` model.
:return tuple: app_label, model_name
"""
parts = path.split('.')
return (''.join(parts[:-1]), parts[-1]) | 998e8d81f59491a51f3ae463c76c8627ed63b435 | 709,372 |
import math
def vec_len(x):
""" Length of the 2D vector"""
length = math.sqrt(x[0]**2 + x[1]**2)
return length | a357d31df808720eb2c4dfc12f4d6194ef904f67 | 709,373 |
def part1_count_increases(measurements):
"""Count increases of a measure with the next."""
windows = zip(measurements[1:], measurements[:-1])
increases = filter(lambda w: w[0] > w[1], windows)
return len(list(increases)) | 59311b940ff7fe72cd6fe9cd4d0705918e796e69 | 709,374 |
def remove_empties(seq):
""" Remove items of length 0
>>> remove_empties([1, 2, ('empty', np.nan), 4, 5])
[1, 2, 4, 5]
>>> remove_empties([('empty', np.nan)])
[nan]
>>> remove_empties([])
[]
"""
if not seq:
return seq
seq2 = [x for x in seq
if not (isinstance(x, tuple) and x and x[0] == 'empty')]
if seq2:
return seq2
else:
return [seq[0][1]] | 500cbbd942682bfde1b9c1babe9a2190413b07fd | 709,375 |
def read_code_blocks_from_md(md_path):
"""
Read ```python annotated code blocks from a markdown file.
Args:
md_path (str): Path to the markdown fle
Returns:
py_blocks ([str]): The blocks of python code.
"""
with open(md_path, "r") as f:
full_md = f.read()
md_py_splits = full_md.split("```python")[1:]
py_blocks = [split.split("```")[0] for split in md_py_splits]
return py_blocks | ca920f74e9326cf5f3635fbb6ebe125b6d97a349 | 709,376 |
import math
def get_localization_scores(predicted_start: int, predicted_end: int, true_start: int, true_end: int):
"""
exp(-abs(t_pred_start-t_start)/(t_end-t_start))
exp(-abs(t_pred_end-t_end)/(t_end-t_start))
:param predicted_start:
:param predicted_end:
:param true_start:
:param true_end:
"""
if true_end - true_start <= 0:
return 0, 0
base = math.exp(1 / (true_start - true_end))
return base ** abs(predicted_start - true_start), base ** abs(predicted_end - true_end) | dfcef55e0594507b48aa83027c5b55a2a6530717 | 709,377 |
def json_compatible_key(key: str) -> str:
"""As defined in :pep:`566#json-compatible-metadata`"""
return key.lower().replace("-", "_") | b914ba17b3da5df84d72497048565a118fc4fb05 | 709,378 |
def _scale_func(k):
"""
Return a lambda function that scales its input by k
Parameters
----------
k : float
The scaling factor of the returned lambda function
Returns
-------
Lambda function
"""
return lambda y_values_input: k * y_values_input | 65fd06bfb1a278b106eecc4974bc9317b1dea67f | 709,379 |
import copy
def simplify_graph(G):
"""remove the scores, so the cycle_exits() function can work"""
graph = copy.deepcopy(G)
simplified = dict((k, graph[k][0]) for k in graph)
# add dummy edges,so the cycle_exists() function works
for source in simplified.keys():
for target in simplified[source]:
if target not in simplified:
simplified[target] = []
return simplified | fc9b052c83ce500d20842367b3b6f011268a5a7d | 709,380 |
def export_python_function(earth_model):
"""
Exports model as a pure python function, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:return: A function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_factory(earth_model.coef_[0, i]))
i += 1
def func(example_iterator):
return [sum(accessor(row) for accessor in accessors) for row in example_iterator]
return func | 593d8cf9f1156359f2276f0481e02a2d00d8ffde | 709,381 |
def get_disable_migration_module():
""" get disable migration """
class DisableMigration:
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
return DisableMigration() | d44a26c5e597f23dbc2434488baf54ebccc5010c | 709,382 |
def split_rows(sentences, column_names):
"""
Creates a list of sentence where each sentence is a list of lines
Each line is a dictionary of columns
:param sentences:
:param column_names:
:return:
"""
new_sentences = []
root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']
start = [dict(zip(column_names, root_values))]
for sentence in sentences:
rows = sentence.split('\n')
sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']
sentence = start + sentence
new_sentences.append(sentence)
return new_sentences | 444733a9c169bedae8dc0045cd696cafed7085e2 | 709,383 |
from datetime import datetime
def _metadata(case_study):
"""Collect metadata in a dictionnary."""
return {
'creation_date': datetime.strftime(datetime.now(), '%c'),
'imagery': case_study.imagery,
'latitude': case_study.lat,
'longitude': case_study.lon,
'area_of_interest': case_study.aoi_latlon.wkt,
'crs': str(case_study.crs),
'country': case_study.country
} | eb16892135326662029fe568922f2871f016090e | 709,384 |
import re
def fix_reference_name(name, blacklist=None):
"""Return a syntax-valid Python reference name from an arbitrary name"""
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('_%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name | 2f1a291fc7ac9816bc2620fceeeaf90a1bb3fd4a | 709,385 |
def get_3d_object_section(target_object):
"""Returns 3D section includes given object like stl.
"""
target_object = target_object.flatten()
x_min = min(target_object[0::3])
x_max = max(target_object[0::3])
y_min = min(target_object[1::3])
y_max = max(target_object[1::3])
z_min = min(target_object[2::3])
z_max = max(target_object[2::3])
return [x_min, x_max, y_min, y_max, z_min, z_max] | e11d62ad06ada005d16803b2f440ac700e272599 | 709,386 |
import requests
import json
def _get_page_num_detail():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研详细
http://data.eastmoney.com/jgdy/xx.html
:return: int 获取 机构调研详细 的总页数
"""
url = "http://data.eastmoney.com/DataCenter_V3/jgdy/xx.ashx"
params = {
"pagesize": "5000",
"page": "1",
"js": "var SZGpIhFb",
"param": "",
"sortRule": "-1",
"sortType": "0",
"rt": "52581407",
}
res = requests.get(url, params=params)
data_json = json.loads(res.text[res.text.find("={")+1:])
return data_json["pages"] | 84c32485637cb481f1ebe6fe05609e5b545daece | 709,387 |
def _get_partition_info(freq_unit):
"""
根据平台单位获取tdw的单位和格式
:param freq_unit: 周期单位
:return: tdw周期单位, 格式
"""
if freq_unit == "m":
# 分钟任务
cycle_unit = "I"
partition_value = ""
elif freq_unit == "H":
# 小时任务
cycle_unit = "H"
partition_value = "YYYYMMDDHH"
elif freq_unit == "d":
# 天任务
cycle_unit = "D"
partition_value = "YYYYMMDD"
elif freq_unit == "w":
# 周任务
cycle_unit = "W"
partition_value = "YYYYMMDD"
elif freq_unit == "M":
# 月任务
cycle_unit = "M"
partition_value = "YYYYMM"
elif freq_unit == "O":
# 一次性任务
cycle_unit = "O"
partition_value = ""
else:
# 其他任务
cycle_unit = "R"
partition_value = ""
return cycle_unit, partition_value | 1f7df3364a21018daa8d3a61507ee59c467c8ffc | 709,388 |
import logging
def stream_logger():
""" sets up the logger for the Simpyl object to log to the output
"""
logger = logging.Logger('stream_handler')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logger.addHandler(handler)
return logger | 45f5af00a0006cc8155bb4a134cce531e51e646a | 709,390 |
def get_breakeven_prob(predicted, threshold = 0):
"""
This function calculated the probability of a stock being above a certain threshhold, which can be defined as a value (final stock price) or return rate (percentage change)
"""
predicted0 = predicted.iloc[0,0]
predicted = predicted.iloc[-1]
predList = list(predicted)
over = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 >= threshold]
less = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 < threshold]
return (len(over)/(len(over) + len(less))) | a1cededbe7a0fbe7ffe19e9b873f55c8ce369590 | 709,391 |
import pathlib
def present_from(ref: pathlib.Path, obs: pathlib.Path) -> pathlib.Path:
"""Build a somehow least surprising difference folder from ref and obs."""
ref_code = ref.parts[-1]
if obs.is_file():
return pathlib.Path(*obs.parts[:-1], f'diff-of-{obs.parts[-1]}')
present = pathlib.Path(*obs.parts[:-1], f'diff-of-{ref_code}_{obs.parts[-1]}')
present.mkdir(parents=True, exist_ok=True)
return present | 59ae1eefaeacc9ddfac773c0c88974b98757d4a2 | 709,392 |
def comp_mass(self):
"""Compute the mass of the Frame
Parameters
----------
self : Frame
A Frame object
Returns
-------
Mfra: float
Mass of the Frame [kg]
"""
Vfra = self.comp_volume()
# Mass computation
return Vfra * self.mat_type.struct.rho | b78ef02f045c1f624b3277ec3e358921b3ea5c02 | 709,393 |
def _create_unicode(code: str) -> str:
"""
Добавление экранизирующего юникод кода перед кодом цвета
:param code: Код, приоритетно ascii escape color code
:return:
"""
return u'\u001b[{}m'.format(code) | 523973766d4f18daca8870e641ac77967b715532 | 709,394 |
def get_all(isamAppliance, check_mode=False, force=False, ignore_error=False):
"""
Retrieving the current runtime template files directory contents
"""
return isamAppliance.invoke_get("Retrieving the current runtime template files directory contents",
"/mga/template_files?recursive=yes", ignore_error=ignore_error) | 9ff291b63471b57b110885c35939c8afe3d2f0d8 | 709,395 |
import os
def _fix_importname(mname):
"""
:param mname:
"""
mname = os.path.normpath(mname)
mname = mname.replace(".", "")
mname = mname.replace("-", "")
mname = mname.replace("_", "")
mname = mname.replace(os.path.sep, "")
mname = mname.replace(os.path.pathsep, "")
return mname | 22f8ab56800a593502822a612c3f642e8cec22ea | 709,396 |
import os
import glob
def get_font_paths(fonts_dir):
"""
Load font path recursively from a folder
:param fonts_dir: folder contains ttf、otf or ttc format font
:return: path of all fonts
"""
print('Load fonts from %s' % os.path.abspath(fonts_dir))
fonts = glob.glob(fonts_dir + '/**/*', recursive=True)
fonts = list(filter(lambda x: os.path.isfile(x), fonts))
print("Total fonts num: %d" % len(fonts))
if len(fonts) == 0:
print("Not found fonts in fonts_dir")
exit(-1)
return fonts | bf6368f90023fd59d64d358e6dac919627feb9ab | 709,398 |
def secondsToHMS(intervalInSeconds):
"""converts time in seconds to a string representing time in hours, minutes, and seconds
:param intervalInSeconds: a time measured in seconds
:returns: time in HH:MM:SS format
"""
interval = [0, 0, intervalInSeconds]
interval[0] = (interval[2] / 3600) - ((interval[2] % 3600) / 3600)
interval[1] = ((interval[2] % 3600) / 60) - ((interval[2] % 3600) % 60) / 60
interval[2] = interval[2] % 60
intervalString = '{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(interval[0],
interval[1], interval[2])
return intervalString | b38d4b886eaabd1361c162b6b7f55e11493dfb60 | 709,399 |
def find_layer(model, type, order=0):
"""
Given a model, find the Nth layer of the specified type.
:param model: the model that will be searched
:param type: the lowercase type, as it is automatically saved by keras in the layer's name (e.g. conv2d, dense)
:param order: 0 by default (the first matching layer will be returned)
:return: The index of the matching layer or None if it was not found.
"""
num_found = 0
for layer in model.layers:
if type + '_' in layer.get_config()['name']:
if order == num_found:
return layer
num_found += 1
return None | 6d4e08c181900774b9e5666a11df9767f68a10ca | 709,400 |
def _find_weektime(datetime, time_type='min'):
"""
Finds the minutes/seconds aways from midnight between Sunday and Monday.
Parameters
----------
datetime : datetime
The date and time that needs to be converted.
time_type : 'min' or 'sec'
States whether the time difference should be specified in seconds or minutes.
"""
if time_type == 'sec':
return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second
elif time_type == 'min':
return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute
else:
raise ValueError("Invalid time type specified.") | 2ed28166d239dabdc9f8811812e472810b10c7d7 | 709,401 |
def field_as_table_row(field):
"""Prints a newforms field as a table row.
This function actually does very little, simply passing the supplied
form field instance in a simple context used by the _field_as_table_row.html
template (which is actually doing all of the work).
See soc/templates/soc/templatetags/_field_as_table_row.html for the CSS
styles used by this template tag.
Usage:
{% load forms_helpers %}
...
<table>
{% field_as_table_row form.fieldname %}
...
</table>
Args:
field: a Django newforms field instance
Returns:
a simple context containing the supplied newforms field instance:
{ 'field': field }
"""
return {'field': field} | 74d120e2a46ae8465832d98ddf02848b5b2cc936 | 709,402 |
def get_samples(select_samples: list, avail_samples: list) -> list:
"""Get while checking the validity of the requested samples
:param select_samples: The selected samples
:param avail_samples: The list of all available samples based on the range
:return: The selected samples, verified
"""
# Sample number has to be positive
if True in [_ < 0 for _ in select_samples]:
raise ValueError(
"Number of samples with -ns has to be strictly positive!")
# Sample number has to be within the available sample
elif False in [_ in avail_samples for _ in select_samples]:
raise ValueError(
"Some or all selected samples are not available in the design")
return select_samples | e1c0c98697d2c504d315064cbdfbad379165d317 | 709,403 |
import collections
def _find_stop_area_mode(query_result, ref):
""" Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order.
:param ref: Name of the reference column.
:returns: Two lists; one to be to be used with `bulk_update_mappings`
and the other strings for invalid areas.
"""
# Group by stop area and reference
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
# Check each area and find mode matching reference
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas | e4677638b272e67d2ae21ee97f71f1f1700fd072 | 709,404 |
def _255_to_tanh(x):
"""
range [0, 255] to range [-1, 1]
:param x:
:return:
"""
return (x - 127.5) / 127.5 | a60a67ee489093292fc58136a8f01387482fb162 | 709,405 |
import datetime
def Write(Variable, f):
"""Function to Convert None Strings to Strings and Format to write to file with ,"""
if isinstance(Variable, str) == False:
if isinstance(Variable, datetime.datetime) == True:
return f.write(f"{Variable.strftime('%Y-%m-%d')},")
else:
Variable = round(Variable, 2)
return f.write(f"{str(Variable)},")
elif isinstance(Variable, str) == True:
return f.write(f"{(Variable)},") | 9963c4117c7cc3f19d91331ed6c36e5733cffb56 | 709,406 |
def root():
"""Root endpoint that only checks if the server is running."""
return 'Server is running...' | ea9ecd1c736e9379795f361462ed54f464a4008b | 709,407 |
def clone_model(model, **new_values):
"""Clones the entity, adding or overriding constructor attributes.
The cloned entity will have exactly the same property values as the
original entity, except where overridden. By default, it will have no
parent entity or key name, unless supplied.
Args:
model: datastore_services.Model. Model to clone.
**new_values: dict(str: *). Keyword arguments to override when
invoking the cloned entity's constructor.
Returns:
datastore_services.Model. A cloned, and possibly modified, copy of self.
Subclasses of BaseModel will return a clone with the same type.
"""
# Reference implementation: https://stackoverflow.com/a/2712401/4859885.
cls = model.__class__
model_id = new_values.pop('id', model.id)
props = {k: v.__get__(model, cls) for k, v in cls._properties.items()} # pylint: disable=protected-access
props.update(new_values)
return cls(id=model_id, **props) | ed668632c8917ad685b86fb5c71146be7c9b3b96 | 709,408 |
def finnegans_wake_unicode_chars():
"""Data fixture that returns a string of all unicode characters in Finnegan's Wake."""
return '¤·àáãéìóôþŒŠŸˆ–—‘’‚“”‡…‹' | 78205c9181545544a61ef1eab6c2f51d212dac13 | 709,409 |
from typing import Any
def accept_data(x: Any) -> Any:
"""Accept any types of data and return it as convenient type.
Args:
x: Any type of data.
Returns:
Any: Accepted data.
"""
if isinstance(x, str):
return x
elif isinstance(x, list):
return x
elif isinstance(x, dict):
return x
elif isinstance(x, tuple):
return x
elif isinstance(x, set):
return x
elif isinstance(x, float):
return x
elif isinstance(x, int):
return x
elif isinstance(x, bool):
return x
elif isinstance(x, type(None)):
return x
else:
return x | 9862995eafb7015fc446466e2dbb7774be39f54b | 709,410 |
def merge_dict_list(merged, x):
""" merge x into merged recursively.
x is either a dict or a list
"""
if type(x) is list:
return merged + x
for key in x.keys():
if key not in merged.keys():
merged[key] = x[key]
elif x[key] is not None:
merged[key] = merge_dict_list(merged[key], x[key])
return merged | 00685be39a0b1447c81ecd8de777ebab38aa9bfe | 709,411 |
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
# with open(f"workspace/{job.id}/{output_log_file}", "r") as fp:
with open(f"{output_log_file}", "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool | 20635ba94b5176298216ad5807e6428a5fb957c2 | 709,412 |
def changenonetoNone(s):
"""Convert str 'None' to Nonetype
"""
if s=='None':
return None
else:
return s | 9f6af1580d8b47d2a7852e433f7ba8bbd5c7044d | 709,413 |
def identify_word_classes(tokens, word_classes):
"""
Match word classes to the token list
:param list tokens: List of tokens
:param dict word_classes: Dictionary of word lists to find and tag with the
respective dictionary key
:return: Matched word classes
:rtype: list
"""
if word_classes is None:
word_classes = []
classes = set()
for key in word_classes:
for token in tokens:
if token.lower() in word_classes[key]:
classes.add(key)
return classes | ca7aa602d19ac196321af19c42a60df415c7d115 | 709,414 |
def is_interested_source_code_file(afile):
"""
If a file is the source code file that we are interested.
"""
tokens = afile.split(".")
if len(tokens) > 1 and tokens[-1] in ("c", "cpp", "pl", "tmpl", "py", "s", "S"):
# we care about C/C++/perl/template/python/assembly source code files
return True
return False | 9bd77dc3b530262cc2bf8a32c0d050ea30077030 | 709,415 |
def recursively_extract(node, exfun, maxdepth=2):
"""
Transform a html ul/ol tree into a python list tree.
Converts a html node containing ordered and unordered lists and list items
into an object of lists with tree-like structure. Leaves are retrieved by
applying `exfun` function to the html nodes not containing any ul/ol list.
Args:
node: BeautifulSoup HTML node to traverse
exfun: function to apply to every string node found
maxdepth: maximal depth of lists to go in the node
Returns:
A tree-like python object composed of lists.
Examples:
>>> node_content = \
'''
<ol>
<li>Hase</li>
<li>Nase<ol><li>Eins</li><li>Zwei</li></ol></li>
</ol>'''
>>> node = BeautifulSoup(node_content, "lxml")
>>> recursively_extract(node, lambda x: x)
[<li>Hase</li>, [<li>Eins</li>, <li>Zwei</li>]]
>>> recursively_extract(node, lambda x: x.get_text())
['Hase', ['Eins', 'Zwei']]
"""
if node.name in ['ol', 'ul']:
lilist = node
else:
lilist = node.ol or node.ul
if lilist and maxdepth:
# apply 'recursively_extract' to every 'li' node found under this node
return [recursively_extract(li, exfun, maxdepth=(maxdepth - 1))
for li in lilist.find_all('li', recursive=False)]
# if this node doesn't contain 'ol' or 'ul' node, return the transformed
# leaf (using the 'exfun' function)
return exfun(node) | cc5732a786579172dda31958ad2bd468a4feef81 | 709,416 |
def get_body(m):
"""extract the plain text body. return the body"""
if m.is_multipart():
body = m.get_body(preferencelist=('plain',)).get_payload(decode=True)
else:
body = m.get_payload(decode=True)
if isinstance(body, bytes):
return body.decode()
else:
return body | 7980c1471a0a09c793cb8124066a97caac21ae0d | 709,417 |
def density(mass, volume):
"""
Calculate density.
"""
return mass / volume * 1 | 53b1f76ba66695a9cd72be9186bcc374ee11f53b | 709,418 |
import os
import warnings
def get_apikey() -> str:
"""
Read and return the value of the environment variable ``LS_API_KEY``.
:return: The string value of the environment variable or an empty string
if no such variable could be found.
"""
api_key = os.environ.get("LS_API_KEY")
if api_key is None:
warnings.warn("No token found in environment variable LS_API_KEY.")
return api_key or "" | b88a6c0ac8e11add97abd1d7415126f75f50696d | 709,419 |
def num_jewels(J: str, S: str) -> int:
"""
Time complexity: O(n + m)
Space complexity: O(n)
"""
jewels = set(J)
return sum(stone in jewels for stone in S) | f1a9632a791e3ef94699b566da61e27d9dc46b07 | 709,420 |
import os
def get_town_table(screenshot_dir):
"""Generate python code for town table
Its format is
table[town_name] = (nearby town1, nearby town2...nearby town5)
The length of tuple may be different depends on town.
Arguments:
screenshot_dir (str): Directory which have town_name directory
and label.
Return:
python code style string (str)
"""
result = "TOWNS_TABLE = {}\n"
for di in sorted(os.listdir(screenshot_dir)):
dir_path = screenshot_dir + "/" + di
if not os.path.isdir(dir_path):
continue
for f in os.listdir(dir_path):
if f.lower().endswith(".txt"):
result += "TOWNS_TABLE[("
lines = open(dir_path + "/" + f).read().splitlines()
for i in range(3, len(lines), 3):
result += "'%s', " % lines[i]
result = result[:-2] + ")]\\"
result += "\n= '%s'\n" % di
break
return result | b6e1c9591cc0531fe9a28b7ce5fec5e5cc231849 | 709,421 |
import glob
def _get_vmedia_device():
"""Finds the device filename of the virtual media device using sysfs.
:returns: a string containing the filename of the virtual media device
"""
sysfs_device_models = glob.glob("/sys/class/block/*/device/model")
vmedia_device_model = "virtual media"
for model_file in sysfs_device_models:
try:
with open(model_file) as model_file_fobj:
if vmedia_device_model in model_file_fobj.read().lower():
vmedia_device = model_file.split('/')[4]
return vmedia_device
except Exception:
pass | e8f8e83b7bf0c73d10d8893a5b4b49670edba7ac | 709,423 |
def has_field_warning(meta, field_id):
"""Warn if dataset has existing field with same id."""
if meta.has_field(field_id):
print(
"WARN: Field '%s' is already present in dataset, not overwriting."
% field_id
)
print("WARN: Use '--replace' flag to overwrite existing field.")
return 1
return 0 | 1cc5016f8ffcce698bcb53dcf6f307b760d7df55 | 709,424 |
def fc_layer(x):
"""Basic Fully Connected (FC) layer with an activation function."""
return x | f26865e13065187363746b8bfe7d95ac221bf236 | 709,425 |
def get_tagset(sentences, with_prefix):
""" Returns the set of entity types appearing in the list of sentences.
If with_prefix is True, it returns both the B- and I- versions for each
entity found. If False, it merges them (i.e., removes the prefix and only
returns the entity type).
"""
iobs = [iob for sent in sentences for (x,iob) in sent]
tagset = set(iobs)
if not with_prefix:
tagset = set([t[2:] for t in list(tagset) if t != 'O'])
return tagset | c0b00f7c5546bfc7fe10b2d4b35998b5dedeba21 | 709,426 |
def normpath(s: str) -> str:
"""Normalize path. Just for compatibility with normal python3."""
return s | 30c528b11f75f52275b753c789e2e3d5bf71641c | 709,428 |
def _proxies_dict(proxy):
"""Makes a proxy dict appropriate to pass to requests."""
if not proxy:
return None
return {'http': proxy, 'https': proxy} | ce51015dc652c494dc89bb11e21f18803ba34c85 | 709,429 |
def gen_run_entry_str(query_id, doc_id, rank, score, run_id):
"""A simple function to generate one run entry.
:param query_id: query id
:param doc_id: document id
:param rank: entry rank
:param score: entry score
:param run_id: run id
"""
return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}' | 657c59fea34e4aed2159337360c973dc99b53082 | 709,430 |
def StretchContrast(pixlist, minmin=0, maxmax=0xff):
""" Stretch the current image row to the maximum dynamic range with
minmin mapped to black(0x00) and maxmax mapped to white(0xff) and
all other pixel values stretched accordingly."""
if minmin < 0: minmin = 0 # pixel minimum is 0
if maxmax > 0xff: maxmax = 0xff # pixel maximum is 255
if maxmax < minmin: maxmax = minmin # range sanity
min, max = maxmax, minmin
for pix in pixlist:
if pix < min and pix >= minmin:
min = pix
if pix > max and pix <= maxmax:
max = pix
if min > max: min = max
if min == max:
f = 1.0
else:
f = 255.0 / (max - min)
n = 0
newpixlist= []
for pix in pixlist:
if pix < minmin: pix = minmin
if pix > maxmax: pix = maxmax
pix = int((pix - min) * f)
newpixlist.append (pix)
return newpixlist | 5f511b4a8bd053d503618767fee06597f1688619 | 709,431 |
def get_database_uri(application):
""" Returns database URI. Prefer SQLALCHEMY_DATABASE_URI over components."""
if application.config.get('SQLALCHEMY_DATABASE_URI'):
return application.config['SQLALCHEMY_DATABASE_URI']
return '{driver}://{username}:{password}@{host}:{port}/{name}'\
.format(driver=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_DRIVER'],
username=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_USERNAME'),
password=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_PASSWORD'),
host=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_HOST'],
port=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_PORT'],
name=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_NAME']) | 6b04a9518798aa3392cdf41667e5edf1fdaa5125 | 709,432 |
def is_variant(title) -> bool:
"""
Check if an issue is variant cover.
"""
return "variant" in title.lower() | 5e0bab3030c069d7726bbc8c9909f561ed139cb8 | 709,433 |
from typing import Tuple
def _lex_single_line_comment(header: str) -> Tuple[str, str]:
"""
>>> _lex_single_line_comment("a=10")
('', 'a=10')
>>> _lex_single_line_comment("//comment\\nb=20")
('', 'b=20')
"""
if header[:2] != "//":
return "", header
line_end_pos = header.find("\n")
return "", header[line_end_pos + 1 :] | 4d562557db11c7279042e439a56cc7864fa259ef | 709,434 |
def default_marker_size(fmt):
""" Find a default matplotlib marker size such that different marker types
look roughly the same size.
"""
temp = fmt.replace('.-', '')
if '.' in temp:
ms = 10
elif 'D' in temp:
ms = 7
elif set(temp).intersection('<>^vd'):
ms = 9
else:
ms = 8
return ms | feebe9bdda47a2e041636f15c9b9595e5cd6b2cc | 709,435 |
def vote_smart_candidate_rating_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'rating': rating.rating,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
'sigId': rating.sigId,
}
return rating_filtered | f4fec92e46f58444abb8dab56f28acc7e670aab0 | 709,436 |
def get_syntax(view):
""" get_syntax(view : sublime.View) -> str
>>> get_syntax(view)
'newLISP'
>>> get_syntax(view)
'Lisp'
Retuns current file syntax/language
"""
syntax = view.settings().get('syntax')
syntax = syntax.split('/')[-1].replace('.tmLanguage', '')
return syntax | a5be75f51de105af63ce53df7c3b7094537d28f3 | 709,437 |
import argparse
def commandline(args):
"""
Settings for the commandline arguments.
Returns the parsed arguments.
"""
parser = argparse.ArgumentParser(description='Checks the timestamps for files in a directory.')
parser.add_argument("-p", "--path", required=True,
help="Path to offline backup list file or directory")
parser.add_argument("-w", "--warning",
help="Threshold for warnings in days. Default: 2 Days")
parser.add_argument("-c", "--critical",
help="Threshold for criticals in days. Default: 5 Days")
parser.add_argument("-f", "--format",
help="Format of the date in the file. Default: Y-m-d")
parser.add_argument("-r", "--regex",
help="Regular Expression to extract date from file. Default: [0-9]{4}-[0-9]{2}-[0-9]{2}")
parser.add_argument("-v", "--verbose",
help="Increase output verbosity",
action="store_true")
parser.set_defaults(verbose=False,
critical=5,
warning=2)
return parser.parse_args(args) | f3c1726e0dfde2bce6cd3e62a2300abbace7900e | 709,438 |
from typing import List
from typing import Dict
def seq_hist(seq_lens: List[int]) -> Dict[int, int]:
"""Returns a dict of sequence_length/count key/val pairs.
For each entry in the list of sequence lengths, tabulates
the frequency of appearance in the list and returns the
data as a dict. Useful for histogram operations on sequence
length.
"""
seq_count = {}
for slen in seq_lens:
if slen in seq_count:
seq_count[slen] += 1
else:
seq_count[slen] = 1
return seq_count | 5778b7566d1b64e8db0e2dce6bbf53e06cdb196d | 709,439 |
import os
def make_ms_url( syndicate_host, syndicate_port, no_tls, urlpath="" ):
"""
Make a URL to the MS.
Return the URL.
"""
scheme = "https://"
default_port = 80
if no_tls:
default_port = 443
scheme = "http://"
if syndicate_port != default_port:
return scheme + os.path.join( syndicate_host.strip("/") + ":%s" % syndicate_port, urlpath )
else:
return scheme + os.path.join( syndicate_host.strip("/"), urlpath ) | 4aec60c48285a8e8f8d58b18ea29928e338fa1bc | 709,440 |
def get_right_list_elements(result):
"""Some of the results are empty - therefore, the try-except.
Others are lists with more than one element and only specific
elements are relevant.
Args:
result (dict of lists): result of the xpath elements.
Returns:
dict of strs
"""
for key in ["title", "ort", "merkmale", "weitere_eigenschaften", "beschreibung"]:
try:
result[key] = result[key][0]
except:
pass
for key in ["preis", "anzahl_raeume", "wohnflaeche", "grundstuecksflaeche"]:
try:
result[key] = result[key][1]
except:
pass
return result | b81e80363f82dfe43878b3d8cb319f7129ebfc50 | 709,441 |
def is_repo_in_config(config, repo, rev, hook_id):
"""Get if a repository is defined in a pre-commit configuration.
Parameters
----------
config : dict
Pre-commit configuration dictionary.
repo : str
Repository to search.
rev : str
Repository tag revision.
hook_id : Hook identifier.
Returns
-------
dict : Information about if the repository and the hook have been found.
"""
response = {"repo_found": False, "hook_found": False, "same_rev": False}
for repo_ in config["repos"]:
if repo_["repo"] == repo:
response["repo_found"] = True
response["hook_found"] = hook_id in [hook["id"] for hook in repo_["hooks"]]
response["same_rev"] = repo_["rev"] == rev
break
return response | 855315c50f4bfe53a4f9b7a5d392bb539e364617 | 709,442 |
def split_dataframe(df, size=10*1024*1024):
"""Splits huge dataframes(CSVs) into smaller segments of given size in bytes"""
# size of each row
row_size = df.memory_usage().sum() / len(df)
# maximum number of rows in each segment
row_limit = int(size // row_size)
# number of segments
seg_num = (len(df)+row_limit-1)//row_limit
# split df into segments
segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]
return segments | 46f34d388e6f596bfcf803b4569eb3015344bafb | 709,443 |
def qx_to_npx(df):
""" Return df with qx converted to npx.
"""
df = 1 - df
out = df.cumprod().shift()
for i in df.index:
out.loc[i, i] = 1
return out | 683a26f57dfb7ae1762df84f74186f0b88cb4688 | 709,444 |
def calculate_line_number(text):
"""Calculate line numbers in the text"""
return len([line for line in text.split("\n") if line.strip() != ""]) | f35533945203ec2f47a89e7072ddd9b172f5554b | 709,446 |
def convert_sentences(sentences, tokenizer):
"""
Truncate each sentence to 512 bpes in order to fit on BERT and convert it to bpes.
:param tokenizer: The BERT tokenizer we used in order convert each sentence to ids.
:param sentences: The tokenized sentences of the summary we are processing.
:return: The ids of the summary sentences.
"""
sentences_ids = []
for i, sent in enumerate(sentences):
if len(sent) > 512:
sentences[i] = sentences[i][:511].append('[SEP]')
sentences_ids.append(tokenizer.convert_tokens_to_ids(sentences[i]))
return sentences_ids | 48cde2cba0af288bff9f49cb2ffc66dd22cfd952 | 709,447 |
def verify_spec(spec_utid, proxy_utid):
"""
For a specific unit test id (utid) compares the spec with the proxy
"""
results=''
for key in spec_utid:
results += '%s: spec=%s, proxy=%s (%s) *** ' % (key,spec_utid[key],proxy_utid[key],(spec_utid.get(key)==proxy_utid.get(key)))
return results | b9854e23f0d88ed4f9abcc0c16236a2d543b9eb0 | 709,448 |
def lammps_created_gsd(job):
"""Check if the mdtraj has converted the production to a gsd trajectory for the job."""
return job.isfile("trajectory-npt.gsd") | a66c899a20e9602098150f46067d5505572232c2 | 709,449 |
def deslugify_province(prov):
"""
Province slug to name, i.e. dashes to spaces and title case.
KZN is a special case.
"""
if prov == 'kwazulu-natal':
return 'KwaZulu-Natal'
return prov.replace('-', ' ').title() | 8e88ea7325c3b911495780b4437bc02784fbad82 | 709,450 |
import re
def parse_vectors(vectors):
""" Basic cleanup of vector or vectors
Strip out V from V#s. Similar to parse tables, this by no means guarantees
a valid entry, just helps with some standard input formats
Parameters
----------
vectors : list of str or str
A string or list of strings of vector names to be parsed
Returns
-------
list of str
vectors with unnecessary characters removed
"""
def parse_vector(vector):
"""Strip string to numeric elements only"""
if isinstance(vector, int): # Already parsed earlier
return vector
return int(re.sub(r'\D', '', vector))
if isinstance(vectors, str):
return [parse_vector(vectors)]
return [parse_vector(v) for v in vectors] | d2161e45bae51db21d7668ea6008ddb9ada16c4e | 709,451 |
def serialize_skycoord(o):
"""
Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.
Args:
o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
representation = o.representation.get_name()
frame = o.frame.name
r = o.represent_as('spherical')
d = dict(
_type='astropy.coordinates.SkyCoord',
frame=frame,
representation=representation,
lon=r.lon,
lat=r.lat)
if len(o.distance.unit.to_string()):
d['distance'] = r.distance
return d | 52830d9243cac36573c358f1579987eb43435892 | 709,452 |
def redis_sentinel(create_sentinel, sentinel, loop):
"""Returns Redis Sentinel client instance."""
redis_sentinel = loop.run_until_complete(
create_sentinel([sentinel.tcp_address], timeout=2, loop=loop))
assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG'
return redis_sentinel | 3b779c9ef73e3bc5949afadbace34a9dcca1273a | 709,453 |
def _diff_tail(msg):
"""`msg` is an arbitrary length difference "path", which could
be coming from any part of the mapping hierarchy and ending in any kind of
selector tree. The last item is always the change message: add, replace,
delete <blah>. The next to last should always be a selector key of some kind.
Back up from there to find the first mapping tuple.
"""
tail = []
for part in msg[::-1]:
if isinstance(part, tuple) and len(part) == 2 and isinstance(part[0], str) and part[0].endswith("map"):
tail.append(part[1])
break
else:
tail.append(part)
return tuple(reversed(tail)) | 224a4ca5f73b1f147c27599b62f0540480e40a0d | 709,454 |
import torch
def choice(x, a):
"""Generate a random sample from an array of given size."""
if torch.is_tensor(x):
return x[torch.randint(len(x), (a,))]
return x | af21321bcd12fe5f1a5eb59b8f0db14096899b5d | 709,455 |
def process_repl_args(args):
""" Process PANDA replay-related arguments.
"""
assert False, 'Not implemented yet.'
cmd = []
cmd.extend(['-display', 'none'])
return cmd
# p_test "${panda_rr}-rr-snp" f "trace memory snapshot"
# p_test "${panda_rr}-rr-nondet.log" f "trace nondet log"
# -pandalog ${opts[-plog]} -replay $panda_rr | 660495454f3b04f76d9aa0447262cb3a8c06b543 | 709,456 |
import pickle
def load_pickle(filename: str):
"""
Load a file from disk.
Parameters
----------
filename: str
Name of the file that is loaded.
Returns
-------
"""
return pickle.load(open(filename, 'rb')) | cae6710ba18664f244c55525c14a6bda0bea314d | 709,458 |
import os
def find_pssm_missing_proteins(fasta_dict, pssm_dir):
"""find_pssm_missing_proteins function finds the missing pssm files of the proteins in fasta file.
Args:
fasta_dict (dict): This is a dict of fasta file. The keys of fasta_dict are protein ids and
values are protein sequences.
pssm_dir (str): It is full path to the directory that contains pssm files.
Returns:
list: The list of proteins that does not have pssm file in pssm_dir
"""
set_missing_prots = set()
set_prots_pssm_exists = set()
for file in os.listdir(pssm_dir):
protein_id = file.split(".")[0]
set_prots_pssm_exists.add(protein_id)
for protein_id in set_prots_pssm_exists:
file = protein_id + ".pssm"
flag = False
sequence = ""
with open(pssm_dir+"/"+file, "r") as fp:
for line in fp:
list_line = line.strip().split()
if len(list_line) > 0:
if list_line[0] == '1':
flag = True
if len(list_line) == 0:
flag = False
if flag:
sequence += list_line[1]
if protein_id in fasta_dict:
if sequence != fasta_dict[protein_id]:
set_missing_prots.add(protein_id)
set_missing_prots = set_missing_prots.union(set(fasta_dict.keys()) - set_prots_pssm_exists)
return list(set_missing_prots) | d3ab3011216329ba7dc9a6d7449d930ea3e536c7 | 709,459 |