content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def generate_legacy_dir(ctx, config, manifest, layers):
"""Generate a intermediate legacy directory from the image represented by the given layers and config to /image_runfiles.
Args:
ctx: the execution context
config: the image config file
manifest: the image manifest file
layers: the list of layer tarballs
Returns:
The filepaths generated and runfiles to be made available.
config: the generated config file.
layers: the generated layer tarball files.
temp_files: all the files generated to be made available at runtime.
"""
# Construct image runfiles for input to pusher.
image_files = [] + layers
if config:
image_files += [config]
if manifest:
image_files += [manifest]
path = "image_runfiles/"
layer_files = []
# Symlink layers to ./image_runfiles/<i>.tar.gz
for i in range(len(layers)):
layer_symlink = ctx.actions.declare_file(path + str(i) + ".tar.gz")
layer_files.append(layer_symlink)
ctx.actions.run_shell(
outputs = [layer_symlink],
inputs = [layers[i]],
command = "ln {src} {dst}".format(
src = layers[i].path,
dst = layer_symlink.path,
),
)
# Symlink config to ./image_runfiles/config.json
config_symlink = ctx.actions.declare_file(path + "config.json")
ctx.actions.run_shell(
outputs = [config_symlink],
inputs = [config],
command = "ln {src} {dst}".format(
src = config.path,
dst = config_symlink.path,
),
)
return {
"config": config_symlink,
"layers": layer_files,
"temp_files": [config_symlink] + layer_files,
} | 6001820e63ac3586625f7ca29311d717cc1e4c07 | 2,954 |
def workflow_key(workflow):
"""Return text search key for workflow"""
# I wish tags were in the manifest :(
elements = [workflow['name']]
elements.extend(workflow['tags'])
elements.extend(workflow['categories'])
elements.append(workflow['author'])
return ' '.join(elements) | 57347705b605e68a286dd953de5bb157ac50628e | 2,955 |
def _extend(obj, *args):
"""
adapted from underscore-py
Extend a given object with all the properties in
passed-in object(s).
"""
args = list(args)
for src in args:
obj.update(src)
for k, v in src.items():
if v is None:
del obj[k]
return obj | 9fe1bffcd05ac44a3587b53a71f592c462975482 | 2,956 |
import locale
import re
def parse_price(price):
"""
Convert string price to numbers
"""
if not price:
return 0
price = price.replace(',', '')
return locale.atoi(re.sub('[^0-9,]', "", price)) | bb90aa90b38e66adc73220665bb5e6458bfe5374 | 2,958 |
import math
def get_weight(stats):
"""
Return a data point weight for the result.
"""
if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats:
return None
try:
a = stats['ci_99_a']
b = stats['ci_99_b']
if math.isinf(a) or math.isinf(b):
# Infinite interval is due to too few samples --- consider
# weight as missing
return None
return 2 / abs(b - a)
except ZeroDivisionError:
return None | 7e44032bc9e51e5fe7522c3f51ead5e733d4107a | 2,960 |
def findmax(engine,user,measure,depth):
"""Returns a list of top (user,measure) pairs, sorted by measure, up to a given :depth"""
neighbors = engine.neighbors(user)
d = {v:measure(user,v) for v in neighbors}
ranked = sorted(neighbors,key=lambda v:d[v],reverse=True)
return list((v,d[v]) for v in ranked[:depth]) | ecf6d72f8c689f1b7af78a714e55d8fbfe57f2ad | 2,961 |
def zscore(dat, mean, sigma):
"""Calculates zscore of a data point in (or outside of) a dataset
zscore: how many sigmas away is a value from the mean of a dataset?
Parameters
----------
dat: float
Data point
mean: float
Mean of dataset
sigma: flaot
Sigma of dataset
"""
zsc = (dat-mean)/sigma
return zsc | b11216e50632e2024af0a389184d5e1dba7ed4fd | 2,963 |
def unused(attr):
"""
This function check if an attribute is not set (has no value in it).
"""
if attr is None:
return True
else:
return False | febc225f3924fdb9de6cfbf7eba871cce5b6e374 | 2,965 |
def get_evaluate_SLA(SLA_terms, topology, evaluate_individual):
"""Generate a function to evaluate if the flow reliability and latency requirements are met
Args:
SLA_terms {SLA} -- an SLA object containing latency and bandwidth requirements
topology {Topology} -- the reference topology object for the flow
evaluate_individual {function}: a cost function, which returns the metric for a given individual
individual {DEAP individual (list)} -- the individual
Returns:
evaluate_SLA {Function}: a function returning True if the requirements are met, False otherwise
"""
def evaluate_SLA(individual):
evaluation = evaluate_individual(individual)
if evaluation[3] > SLA_terms.latency or evaluation[1] > 1:
return False
return True
return evaluate_SLA | 81fdaa07e3fc21066ab734bef0cc71457d40fb5b | 2,966 |
def latest_consent(user, research_study_id):
"""Lookup latest valid consent for user
:param user: subject of query
:param research_study_id: limit query to respective value
If latest consent for user is 'suspended' or 'deleted', this function
will return None. See ``consent_withdrawal_dates()`` for that need.
:returns: the most recent consent based on given criteria, or None
if no match is located
"""
# consents are ordered desc(acceptance_date)
for consent in user.valid_consents:
if consent.research_study_id != research_study_id:
continue
if consent.status == 'consented':
return consent
return None | 2295b592a0c1fdaf3b1ed21e065f39e73a4bb622 | 2,967 |
from typing import Tuple
def find_next_tag(template: str, pointer: int, left_delimiter: str) -> Tuple[str, int]:
"""Find the next tag, and the literal between current pointer and that tag"""
split_index = template.find(left_delimiter, pointer)
if split_index == -1:
return (template[pointer:], len(template))
return (template[pointer:split_index], split_index) | 82d091ef6738ffbe93e8ea8a0096161fc359e9cb | 2,968 |
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w | 9b78d86592622100322d7a4ec031c1bd531ca51a | 2,970 |
import pickle
def load_pyger_pickle(filename):
""" Load pyger data from pickle file back into object compatible with pyger plotting methods
:param filename: File name of pickled output from calc_constraints()
This is only meant to be used to read in the initial constraints object produced by
calc_constraints(), not the cooldown data produced by calc_constraints2(). The data prduced
by calc_constraints2() should be able to be read in with a simple pickle.load() function.
"""
class saved_pyger_data(object):
def __init__(self, pickled_constraint):
for key in pickled_constraint:
self.__dict__.update({key:pickled_constraint[key]})
rawdata = pickle.load(open(filename,'rb'))
pyger_compatible_data = {}
for name in list(rawdata.keys()):
constraint = saved_pyger_data(rawdata[name])
pyger_compatible_data.update({name:constraint})
return pyger_compatible_data | 23f4d4f2e3cae514ed65d62035277417c9b246a8 | 2,971 |
def grow_population(initial, days_to_grow):
"""
Track the fish population growth from an initial population, growing over days_to_grow number of days.
To make this efficient two optimizations have been made:
1. Instead of tracking individual fish (which doubles every approx. 8 days which will result O(10^9)
fish over 256 days), we instead compute the sum of fish with the same due date and use the due date
as the offset into the current popluation list. For example, if 5 fish have a timer of 1 and 2 fish
have a timer of 4 the population would be tracked as: [0, 5, 0, 0, 2, 0, 0, 0, 0]
2. Modulo arithmetic is used instead of fully iterating through the entire list to decrement the due
date of each fish every day. Using modula arithmetic provides a projection into the fish data that
looks like its changing each day without needing O(n) operations and instead we can update the list
in constant time regardless of the number of different ages for fish.
"""
current = list(initial)
if days_to_grow == 0:
return current
for day in range(0, days_to_grow):
due_index = day % 9
due_count = current[due_index]
current[(day+7)%9] += due_count
current[(day+9)%9] += due_count
current[due_index] = max(0, current[due_index] - due_count)
return current | 88b8283e5c1e6de19acb76278ef16d9d6b94de00 | 2,974 |
import six
def _ensure_list(alist): # {{{
"""
Ensure that variables used as a list are actually lists.
"""
# Authors
# -------
# Phillip J. Wolfram, Xylar Asay-Davis
if isinstance(alist, six.string_types):
# print 'Warning, converting %s to a list'%(alist)
alist = [alist]
return alist | bd8115dad627f4553ded17757bfb838cfdb0200b | 2,975 |
import numpy
import math
def two_angle_circular_correlation_coef(angles1, angles2, mean1, mean2):
"""
Circular correlation measure. SenGupta 2001
"""
centered_a = angles1-mean1
centered_b = angles2-mean2
sin_centered_a = numpy.sin(centered_a)
sin_centered_b = numpy.sin(centered_b)
sin2_a = sin_centered_a*sin_centered_a
sin2_b = sin_centered_b*sin_centered_b
return numpy.dot(sin_centered_a, sin_centered_b) / math.sqrt(numpy.dot(sin2_a, sin2_b)) | 6a95f8726f45105c68b9c0b4f8f13191a88734e2 | 2,976 |
from typing import Union
import yaml
def format_data(data: Union[dict, list]) -> str:
"""
:param data: input data
:return: pretty formatted yaml representation of a dictionary
"""
return yaml.dump(data, sort_keys=False, default_flow_style=False) | b4e79a8957995fb8e2eaa549a6a208a48574a598 | 2,977 |
def showgraphwidth(context, mapping):
"""Integer. The width of the graph drawn by 'log --graph' or zero."""
# just hosts documentation; should be overridden by template mapping
return 0 | 6e2fad8c80264a1030e5a113d66233c3adc28af8 | 2,980 |
def diff_last_filter(trail, key=lambda x: x['pid']):
""" Filter out trails with last two key different
"""
return trail if key(trail[-1]) != key(trail[-2]) else None | 82e67a98a1b09e11f2f1ebd76f470969b2dd1a51 | 2,981 |
import math
def motion(x, u, dt):
"""
motion model
"""
x[2] += u[1] * dt
x[0] += u[0] * math.cos(x[2]) * dt
x[1] += u[0] * math.sin(x[2]) * dt
x[3] = u[0]
x[4] = u[1]
return x | e33adae2a6c5934dc7e0662570c42292eacbfd89 | 2,983 |
import os
from datetime import datetime
def parse_metadata_from_sensorcommunity_csv_filename(filename):
"""Parse sensor id, sensor type and date from a raw luftdaten.info AQ .csv
filename.
Parameters:
filename (path): the file to parse. Format of the file is expected to be
the one used by the luftdaten.info project and saved by
sensor.community, for example as in the one below:
https://archive.sensor.community/2020-01-13/
2020-01-12_sds011_sensor_35233.csv
Return:
tuple: (sensor_id, sensor_type, date) if possible,
(None, None, None) otherwise
"""
tokens = os.path.basename(os.path.splitext(filename)[0]).split("_")
if len(tokens) == 4 and tokens[2] == "sensor":
try:
date = datetime.strptime(tokens[0], "%Y-%m-%d")
except ValueError:
date = None
sensor_type = tokens[1]
sensor_id = int(tokens[3])
return (sensor_id, sensor_type, date)
# failure
return (None, None, None) | 3b16a945930311fdbb0094956c8a58fa5f4b5a70 | 2,985 |
def merge_two_sorted_array(l1, l2):
"""
Time Complexity: O(n+m)
Space Complexity: O(n+m)
:param l1: List[int]
:param l2: List[int]
:return: List[int]
"""
if not l1:
return l2
if not l2:
return l1
merge_list = []
i1 = 0
i2 = 0
l1_len = len(l1) - 1
l2_len = len(l2) - 1
while i1 <= l1_len and i2 <= l2_len:
if l1[i1] < l2[i2]:
merge_list.append(l1[i1])
i1 += 1
else:
merge_list.append(l2[i2])
i2 += 1
while i1 <= l1_len:
merge_list.append(l1[i1])
i1 += 1
while i2 <= l2_len:
merge_list.append(l2[i2])
i2 += 1
return merge_list | 2671d21707056741bbdc4e3590135e7e1be4c7e9 | 2,987 |
def classpartial(*args, **kwargs):
"""Bind arguments to a class's __init__."""
cls, args = args[0], args[1:]
class Partial(cls):
__doc__ = cls.__doc__
def __new__(self):
return cls(*args, **kwargs)
Partial.__name__ = cls.__name__
return Partial | 7cdc96e314a2ce3c658ecb886922df4d7bda5b99 | 2,988 |
def check_similarity(var1, var2, error):
"""
Check the simulatiry between two numbers, considering a error margin.
Parameters:
-----------
var1: float
var2: float
error: float
Returns:
-----------
similarity: boolean
"""
if((var1 <= (var2 + error)) and (var1 >= (var2 - error))):
return True
else:
return False | 305fd08cf4d8b1718d8560315ebf7bd03a4c7e2a | 2,989 |
def getCasing(word):
""" Returns the casing of a word"""
if len(word) == 0:
return 'other'
elif word.isdigit(): #Is a digit
return 'numeric'
elif word.islower(): #All lower case
return 'allLower'
elif word.isupper(): #All upper case
return 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
return 'initialUpper'
return 'other' | 2af70926c0cbbde6310abb573ccc3ee8260b86bd | 2,990 |
def normalize_angle(deg):
"""
Take an angle in degrees and return it as a value between 0 and 360
:param deg: float or int
:return: float or int, value between 0 and 360
"""
angle = deg
while angle > 360:
angle -= 360
while angle < 360:
angle += 360
return angle | cd4788819bbc8fce17ca7c7b1b320499a3893dee | 2,991 |
def makeFields(prefix, n):
"""Generate a list of field names with this prefix up to n"""
return [prefix+str(n) for n in range(1,n+1)] | 435571557ef556b99c4729500f372cc5c9180052 | 2,992 |
def process_input_dir(input_dir):
"""
Find all image file paths in subdirs, convert to str and extract labels from subdir names
:param input_dir Path object for parent directory e.g. train
:returns: list of file paths as str, list of image labels as str
"""
file_paths = list(input_dir.rglob('*.png'))
file_path_strings = [str(path) for path in file_paths]
label_strings = [path.parent.name for path in file_paths]
return file_path_strings, label_strings | 569d4539368888c91a12538156c611d311da03b6 | 2,993 |
def fak(n):
""" Berechnet die Fakultaet der ganzen Zahl n. """
erg = 1
for i in range(2, n+1):
erg *= i
return erg | 9df6f4fa912a25535369f4deb0a06baef8e6bdcc | 2,994 |
import re
def create_sequences_sonnets(sonnets):
"""
This creates sequences as done in Homework 6, by mapping each word
to an integer in order to create a series of sequences. This function
specifically makes entire sonnets into individual sequences
and returns the list of processed sonnets back to be used in the basic
HMM notebook for generation.
"""
sequences = []
obs_counter = 0
obs_map = {}
for sonnet in sonnets:
sequence = []
for i, line in enumerate(sonnet):
split = line.split()
for word in split:
word = re.sub(r'[^\w]', '', word).lower()
if word not in obs_map:
# Add unique words to the observations map.
obs_map[word] = obs_counter
obs_counter += 1
# Add the encoded word.
sequence.append(obs_map[word])
# Add the encoded sequence.
sequences.append(sequence)
return obs_map, sequences | 56087140fe5ed8934b64a18567b4e9023ddc6f59 | 2,995 |
import requests
def get_longitude_latitude(city_info, station):
"""
利用高德地图查询对应的地铁站经纬度信息,下面的key需要自己去高德官网申请
https://lbs.amap.com/api/webservice/guide/api/georegeo
:param city_info: 具体城市的地铁,如:广州市地铁
:param station: 具体的地铁站名称,如:珠江新城站
:return: 经纬度
"""
addr = city_info + station
print('*要查找的地点:' + addr)
parameters = {'address': addr, 'key': '98a3444618af14c0f20c601f5a442000'}
base = 'https://restapi.amap.com/v3/geocode/geo'
response = requests.get(base, parameters, timeout=10) # 超时设置为10s,翻墙开了全局代理会慢点的
if response.status_code == 200:
answer = response.json()
x, y = answer['geocodes'][0]['location'].split(',')
coor = (float(x), float(y))
print('*' + station + '的坐标是:', coor)
return coor
else:
return (None, None) | 9b0132702e14af9dec1ce65724139af0188b14a0 | 2,996 |
def set_difference(tree, context, attribs):
"""A meta-feature that will produce the set difference of two boolean features
(will have keys set to 1 only for those features that occur in the first set but not in the
second).
@rtype: dict
@return: dictionary with keys for key occurring with the first feature but not the second, and \
keys equal to 1
"""
ret = {}
for key, val in context['feats'][attribs[0]].items():
if key not in context['feats'][attribs[1]]:
ret[key] = val
return ret | 7887f619e601624843c6507e7b93442020ecf1ea | 2,997 |
def recursively_replace(original, replacements, include_original_keys=False):
"""Clones an iterable and recursively replaces specific values."""
# If this function would be called recursively, the parameters 'replacements' and 'include_original_keys' would have to be
# passed each time. Therefore, a helper function with a reduced parameter list is used for the recursion, which nevertheless
# can access the said parameters.
def _recursion_helper(obj):
#Determine if the object should be replaced. If it is not hashable, the search will throw a TypeError.
try:
if obj in replacements:
return replacements[obj]
except TypeError:
pass
# An iterable is recursively processed depending on its class.
if hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
if isinstance(obj, dict):
contents = {}
for key, val in obj.items():
new_key = _recursion_helper(key) if include_original_keys else key
new_val = _recursion_helper(val)
contents[new_key] = new_val
else:
contents = []
for element in obj:
new_element = _recursion_helper(element)
contents.append(new_element)
# Use the same class as the original.
return obj.__class__(contents)
# If it is not replaced and it is not an iterable, return it.
return obj
return _recursion_helper(original) | aee393b09c74eb6cb1417d017d7004ac69bb3543 | 2,998 |
def load_subspace_vectors(embd, subspace_words):
"""Loads all word vectors for the particular subspace in the list of words as a matrix
Arguments
embd : Dictonary of word-to-embedding for all words
subspace_words : List of words representing a particular subspace
Returns
subspace_embd_mat : Matrix of word vectors stored row-wise
"""
subspace_embd_mat = []
ind = 0
for word in subspace_words:
if word in embd:
subspace_embd_mat.append(embd[word])
ind = ind+1
return subspace_embd_mat | 5eb1db8be8801cf6b1fe294a6f2c93570e9a9fe1 | 3,000 |
import codecs
import binascii
def decode_hex(data):
"""Decodes a hex encoded string into raw bytes."""
try:
return codecs.decode(data, 'hex_codec')
except binascii.Error:
raise TypeError() | 115e89d6f80a6fc535f44d92f610a6312edf6daf | 3,001 |
import os
def filepath(folder, *args, ext='pkl'):
"""Returns the full path of the file with the calculated results
for the given dataset, descriptor, descriptor of the given dataset
Parameters
----------
folder : string
Full path of the folder where results are saved.
args : list or tuple
Instances of `TextureDataset`, `HEP`, `KNeighborsClassifier`, etc.
ext : string
File extension (default pkl).
Returns
-------
fullpath : string
The complete path of the file where features corresponding to the
given dataset and descriptor (and estimator) are stored.
"""
lst = []
for obj in args:
if hasattr(obj, 'acronym'):
item = obj.acronym
else:
item = obj.__name__
lst.append(item)
lst[-1] = lst[-1] + '.' + ext
fullpath = os.path.join(folder, '--'.join(lst))
return fullpath | b558559a7b92db6943b6dd04670d9dc4097b5675 | 3,003 |
import os
def normalizeFilename(filename):
"""Take a given filename and return the normalized version of it.
Where ~/ is expanded to the full OS specific home directory and all
relative path elements are resolved.
"""
result = os.path.expanduser(filename)
result = os.path.abspath(result)
return result | a83e1ece98d23708eb6ae8a2acbe4f8495f9e2b8 | 3,006 |
def get_tn(tp, fp, fn, _all):
"""
Args:
tp (Set[T]):
fp (Set[T]):
fn (Set[T]):
_all (Iterable[T]):
Returns:
Set[T]
"""
return set(_all) - tp - fp - fn | a9afa3a2f07c8b63a6d6911b9a54cf9f9df08600 | 3,007 |
def select_tests(blocks, match_string_list, do_test):
"""Remove or keep tests from list in WarpX-tests.ini according to do_test variable"""
if do_test not in [True, False]:
raise ValueError("do_test must be True or False")
if (do_test == False):
for match_string in match_string_list:
print('Selecting tests without ' + match_string)
blocks = [ block for block in blocks if not match_string in block ]
else:
for match_string in match_string_list:
print('Selecting tests with ' + match_string)
blocks = [ block for block in blocks if match_string in block ]
return blocks | f77a0b9e91ec34b85479a442008241c7da386beb | 3,008 |
def get_namespace_from_node(node):
"""Get the namespace from the given node
Args:
node (str): name of the node
Returns:
namespace (str)
"""
parts = node.rsplit("|", 1)[-1].rsplit(":", 1)
return parts[0] if len(parts) > 1 else u":" | a2305719c0e72614f75309f1412ce71c9264b5df | 3,010 |
def make_feature(func, *argfuncs):
"""Return a customized feature function that adapts to different input representations.
Args:
func: feature function (callable)
argfuncs: argument adaptor functions (callable, take `ctx` as input)
"""
assert callable(func)
for argfunc in argfuncs:
assert callable(argfunc)
def _feature(ctx):
return func(*[argfunc(ctx) for argfunc in argfuncs])
return _feature | 26064ee0873d63edc877afdcb03a39e40453a831 | 3,012 |
from datetime import datetime
def get_time(sec_scale):
"""time since epoch in milisecond
"""
if sec_scale == 'sec':
scale = 0
elif sec_scale == 'msec':
scale = 3
else:
raise
secs = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
return int(secs * pow(10, scale)) | c233133d61c6347a27186ef3baf0ae2bc79cf8f2 | 3,013 |
def city_country(city, country, population=''):
"""Generate a neatly formatted city/country name."""
full_name = city + ', ' + country
if population:
return full_name.title() + ' - population ' + str(population)
else:
return full_name.title() | 23be8d5b39380fd177240e479cf77ac7eb6c7459 | 3,015 |
def generate_headermap(line,startswith="Chr", sep="\t"):
"""
>>> line = "Chr\\tStart\\tEnd\\tRef\\tAlt\\tFunc.refGene\\tGene.refGene\\tGeneDetail.refGene\\tExonicFunc.refGene\\tAAChange.refGene\\tsnp138\\tsnp138NonFlagged\\tesp6500siv2_ea\\tcosmic70\\tclinvar_20150629\\tOtherinfo"
>>> generate_headermap(line)
{'Chr': 0, 'Start': 1, 'End': 2, 'Ref': 3, 'Alt': 4, 'Func.refGene': 5, 'Gene.refGene': 6, 'GeneDetail.refGene': 7, 'ExonicFunc.refGene': 8, 'AAChange.refGene': 9, 'snp138': 10, 'snp138NonFlagged': 11, 'esp6500siv2_ea': 12, 'cosmic70': 13, 'clinvar_20150629': 14, 'Otherinfo': 15}
"""
if not line.startswith(startswith):
raise Exception("Header line should start with \"{0}\"".format(startswith))
else:
if line.startswith("#"):
line = line[1:]
return dict([(v, i) for i,v in enumerate(line.rstrip().split(sep))]) | 16bbbc07fa13ff9bc8ec7af1aafc4ed65b20ec4c | 3,016 |
import math
def log_density_igaussian(z, z_var):
"""Calculate log density of zero-mean isotropic gaussian distribution given z and z_var."""
assert z.ndimension() == 2
assert z_var > 0
z_dim = z.size(1)
return -(z_dim/2)*math.log(2*math.pi*z_var) + z.pow(2).sum(1).div(-2*z_var) | a412b9e25aecfc2baed2d783a2d7cd281fadc9fb | 3,017 |
def _get_crop_frame(image, max_wiggle, tx, ty):
"""
Based on on the max_wiggle, determines a cropping frame.
"""
pic_width, pic_height = image.size
wiggle_room_x = max_wiggle * .5 * pic_width
wiggle_room_y = max_wiggle * .5 * pic_height
cropped_width = pic_width - wiggle_room_x
cropped_height = pic_height - wiggle_room_y
left = int(tx * wiggle_room_x)
top = int(ty * wiggle_room_y)
right = left + cropped_width
bottom = top + cropped_height
return left, top, right, bottom | 18442a97544d6c4bc4116dc43811c9fcd0d203c6 | 3,019 |
def l2sq(x):
"""Sum the matrix elements squared
"""
return (x**2).sum() | c02ea548128dde02e4c3e70f9280f1ded539cee9 | 3,020 |
def comp_number_phase_eq(self):
"""Compute the equivalent number of phase
Parameters
----------
self : LamSquirrelCage
A LamSquirrelCage object
Returns
-------
qb: float
Zs/p
"""
return self.slot.Zs / float(self.winding.p) | f4679cf92dffff138a5a96787244a984a11896f9 | 3,021 |
def process_ps_stdout(stdout):
""" Process the stdout of the ps command """
return [i.split()[0] for i in filter(lambda x: x, stdout.decode("utf-8").split("\n")[1:])] | c086cc88c51484abe4308b3ac450faaba978656e | 3,023 |
from typing import Optional
from typing import Iterable
def binidx(num: int, width: Optional[int] = None) -> Iterable[int]:
""" Returns the indices of bits with the value `1`.
Parameters
----------
num : int
The number representing the binary state.
width : int, optional
Minimum number of digits used. The default is the global value `BITS`.
Returns
-------
binidx : list
"""
fill = width or 0
return list(sorted(i for i, char in enumerate(f"{num:0{fill}b}"[::-1]) if char == "1")) | 70d1895cf0141950d8e2f5efe6bfbf7bd8dbc30b | 3,024 |
def file_root_dir(tmpdir_factory):
"""Prepares the testing dirs for file tests"""
root_dir = tmpdir_factory.mktemp('complex_file_dir')
for file_path in ['file1.yml',
'arg/name/file2',
'defaults/arg/name/file.yml',
'defaults/arg/name/file2',
'vars/arg/name/file1.yml',
'vars/arg/name/file3.yml',
'vars/arg/name/nested/file4.yml']:
root_dir.join(file_path).ensure()
return root_dir | 834e0d850e7a7dd59d792e98ed25b909d5a20567 | 3,026 |
def _get_object_description(target):
"""Return a string describing the *target*"""
if isinstance(target, list):
data = "<list, length {}>".format(len(target))
elif isinstance(target, dict):
data = "<dict, length {}>".format(len(target))
else:
data = target
return data | 57ad3803a702a1199639b8fe950ef14b8278bec1 | 3,027 |
def computeStatistic( benchmarks, field, func ):
"""
Return the result of func applied to the values of field in benchmarks.
Arguments:
benchmarks: The list of benchmarks to gather data from.
field: The field to gather from the benchmarks.
func: The function to apply to the data, must accept a list and return a single value.
"""
results = []
for benchmark in benchmarks:
results.append( benchmark[ field ] )
return func( results ) | 7eced912d319a3261170f8274c4562db5e28c34c | 3,028 |
import cmath
def powerFactor(n):
"""Function to compute power factor given a complex power value
Will this work if we're exporting power? I think so...
"""
# Real divided by apparent
pf = n.real.__abs__() / n.__abs__()
# Determine lagging vs leading (negative).
# NOTE: cmath.phase returns counter-clockwise angle on interval [-pi, pi],
# so checking sign should be reliable for determining lead vs. lag
p = cmath.phase(n)
if p < 0:
return (pf, 'lead')
else:
return (pf, 'lag') | 1a507818f9c9906d27a1374cc9b757766b3038c1 | 3,029 |
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True | 8c586375f018be36c0e7688549a551d17d4e2bc8 | 3,030 |
def flat_list_of_lists(l):
"""flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
return [item for sublist in l for item in sublist] | c121dff7d7d9a4da55dfb8aa1337ceeea191fc30 | 3,031 |
def multiVecMat( vector, matrix ):
"""
Pronásobí matici vektorem zprava.
Parametry:
----------
vector: list
Vektor
matrix: list
Pronásobená matice. Její dimenze se musí shodovat s dimenzí
vektoru.
Vrací:
list
Pole velikosti vektoru.
"""
# Vytvoří pole o velikosti vektoru
result = [0] * len( matrix[0] )
# Projde matici po řádcích
for r, row in enumerate( matrix ):
# Pokud nesedí rozměry, končíme
if len(row) != len(vector):
return None
# Projde každý prvek v řádku
for i, elem in enumerate( row ):
# K poli s výsledkem přičte na index aktuálního řádku výsledek
# násobení aktuálního prvku v řádku a jemu odpovídajícího
# prvku z vektoru.
result[r] += elem * vector[i]
return result | 8a10241173ab981d6007d8ff939199f9e86806e5 | 3,033 |
def wait_until_complete(jobs):
"""wait jobs finish"""
return [j.get() for j in jobs] | 530c3af30ca40025891980191c1f121d8f026a53 | 3,034 |
import requests
import shutil
def download(url, filename, proxies=None):
"""
Telechargement de l'URL dans le fichier destination
:param url: URL a telecharger
:param filename: fichier de destination
"""
error = ''
try:
req = requests.get(url, proxies=proxies, stream=True)
with open(filename, "wb") as f:
shutil.copyfileobj(req.raw, f)
except FileNotFoundError as fnf:
error = f"Error while downloading {url} - I/O Problem with {filename} : FileNotFound -> check path"
except Exception as ex:
error = f"Error while downloading {url}. {str(ex)}"
return len(error) == 0, error, filename | da097b46aef574623ac975aa8d5e9506ff191d53 | 3,035 |
def etaCalc(T, Tr = 296.15, S = 110.4, nr = 1.83245*10**-5):
"""
Calculates dynamic gas viscosity in kg*m-1*s-1
Parameters
----------
T : float
Temperature (K)
Tr : float
Reference Temperature (K)
S : float
Sutherland constant (K)
nr : float
Reference dynamic viscosity
Returns
-------
eta : float
Dynamic gas viscosity in kg*m-1*s-1
"""
eta = nr * ( (Tr + S) / (T+S) )*(T/Tr)**(3/2)
return eta | 3f8182ea29fd558e86280477f2e435247d09798e | 3,037 |
def sharpe_ratio(R_p, sigma_p, R_f=0.04):
"""
:param R_p: 策略年化收益率
:param R_f: 无风险利率(默认0.04)
:param sigma_p: 策略收益波动率
:return: sharpe_ratio
"""
sharpe_ratio = 1.0 * (R_p - R_f) / sigma_p
return sharpe_ratio | d197df7aa3b92f3a32cc8f11eb675012ffe8af57 | 3,039 |
def _inline_svg(svg: str) -> str:
"""Encode SVG to be used inline as part of a data URI.
Replacements are not complete, but sufficient for this case.
See https://codepen.io/tigt/post/optimizing-svgs-in-data-uris
for details.
"""
replaced = (
svg
.replace('\n', '%0A')
.replace('#', '%23')
.replace('<', '%3C')
.replace('>', '%3E')
.replace('"', '\'')
)
return 'data:image/svg+xml,' + replaced | 4e3c25f5d91dd7691f42f9b9ace4d64a297eb32f | 3,040 |
import click
def implemented_verified_documented(function):
""" Common story options """
options = [
click.option(
'--implemented', is_flag=True,
help='Implemented stories only.'),
click.option(
'--unimplemented', is_flag=True,
help='Unimplemented stories only.'),
click.option(
'--verified', is_flag=True,
help='Stories verified by tests.'),
click.option(
'--unverified', is_flag=True,
help='Stories not verified by tests.'),
click.option(
'--documented', is_flag=True,
help='Documented stories only.'),
click.option(
'--undocumented', is_flag=True,
help='Undocumented stories only.'),
click.option(
'--covered', is_flag=True,
help='Covered stories only.'),
click.option(
'--uncovered', is_flag=True,
help='Uncovered stories only.'),
]
for option in reversed(options):
function = option(function)
return function | 8c1dd5aaa0b962d96e9e90336183a29e2cf360db | 3,041 |
from typing import Dict
def example_metadata(
request,
l1_ls5_tarball_md_expected: Dict,
l1_ls7_tarball_md_expected: Dict,
l1_ls8_folder_md_expected: Dict,
):
"""
Test against arbitrary valid eo3 documents.
"""
which = request.param
if which == "ls5":
return l1_ls5_tarball_md_expected
elif which == "ls7":
return l1_ls7_tarball_md_expected
elif which == "ls8":
return l1_ls8_folder_md_expected
raise AssertionError | fd67c395aa7d773bc5757ca5649fed60b023e14f | 3,042 |
def query_rockets():
"""
request all rockets
"""
query = '''
{
rockets {
id
}
}
'''
return query | 8bf6c912a21bc0250c9a74f7fc26347b50ba1fa8 | 3,044 |
import os
import shutil
def copyfile(path, dest_dir, workspace = ""):
"""
path the full filepath to a file
dest_dir destination for copy
returns the full filepath of the new destination
removes the workspace from the filepath to give a
workspace relative filepath.
"""
if os.path.isfile(path):
head, tail = os.path.split(path)
destination = os.path.join(workspace, dest_dir, tail)
if not os.path.isfile(destination):
shutil.copy(path, destination)
print("Added {0}".format(destination))
else:
print("Found {0}".format(destination))
return destination.replace(workspace + "\\", "")
else:
print("{0} is an invalid filepath!".format(path))
return None | 6c7791aa5d3ba01fdc3f5e41c118b61d78af26ac | 3,045 |
def modify_column_cell_content(content, value_to_colors):
"""
Function to include colors in the cells containing values.
Also removes the index that was used for bookkeeping.
"""
idx, value = content
if type(value) == int or type(value) == float:
color = value_to_colors[content]
return ' '.join(['\cellcolor{{{}}}'.format(color), str(value)])
else:
return value | efbac52eb49efa2054b7b346def96b8e7608bae7 | 3,047 |
import curses
def _make_selection(stdscr, classes, message='(select one)'):
"""
This function was originally branched from https://stackoverflow.com/a/45577262/5009004
:return: option, classes index
:rtype: (str, int)
"""
attributes = {}
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
attributes['normal'] = curses.color_pair(1)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
attributes['highlighted'] = curses.color_pair(2)
c = 0 # last character read
option = 0 # the current option that is marked
while c != 10: # Enter in ascii
stdscr.erase()
stdscr.addstr(f"{message}\n", curses.A_UNDERLINE)
for i in range(len(classes)):
if i == option:
attr = attributes['highlighted']
else:
attr = attributes['normal']
try:
stdscr.addstr(f"{i + 1}. ")
stdscr.addstr(classes[i] + '\n', attr)
except curses.error as e:
print(f"Curses error {classes[i]} {attr}")
print(e)
return None, None
c = stdscr.getch()
if c == curses.KEY_UP and option > 0:
option -= 1
elif c == curses.KEY_DOWN and option < len(classes) - 1:
option += 1
# stdscr.addstr("You chose {0}".format(classes[option]))
# stdscr.getch()
return classes[option], option | 7b2d22e70c84138d4bcfae1d1bc5d6a11d4ce806 | 3,048 |
import json
def get_config(config_path):
""" Open a Tiler config and return it as a dictonary """
with open(config_path) as config_json:
config_dict = json.load(config_json)
return config_dict | 72a2133b44ffc553ad72d6c9515f1f218de6a08c | 3,049 |
import binascii
def fmt_hex(bytes):
"""Format the bytes as a hex string, return upper-case version.
"""
# This is a separate function so as to not make the mistake of
# using the '%X' format string with an ints, which will not
# guarantee an even-length string.
#
# binascii works on all versions of Python, the hex encoding does not.
hex = binascii.hexlify(bytes)
hex = hex.decode() # Returns bytes, which makes no sense to me
return hex.upper() | d25379ec333a653549c329932e304e61c57f173d | 3,050 |
import argparse
import sys
def collect_input_arguments():
"""
Collecting input arguments at the command line for use later.
"""
parser = argparse.ArgumentParser(prog= 'Alignseq', description='Align Codon Sequences', usage='%(prog)s [options]', epilog="And that's how you make an Alignment!")
parser.add_argument('-inf', metavar='Infile', action='store', help='A input file of codons')
parser.add_argument('-outf', metavar='Outfile', action='store', help='An Output file (desired path) of codon Alignment')
parser.add_argument('-prog', metavar='Program', action='store', help='Desired program to Align Sequences', default='mafft')
parser.add_argument('-outtranslated', metavar='Outfile for Translated Data', action='store', help='An Output file (desired path) for translated data')
parser.add_argument('-outtransaligned', metavar='Outfile for Translated and Aligned Data', action='store', help='An Output file (desired path) for translated and aligned data')
parser.add_argument('-outformat', metavar='Output Format', action='store', help='An Output Format', default = "fasta")
# Will print the help menu if no arguments are passed to alignseq.py.
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
# Returns arguments for use in classes.
return parser.parse_args() | 8e07a337a311605f32714c13e7b552bb48347198 | 3,052 |
def isA(token, tt=None, tv=None):
"""
function to check if a token meets certain criteria
"""
# Row and column info may be useful? for error messages
try:
tokTT, tokTV, _row, _col = token
except:
return False
if tt is None and tv is None:
return True
elif tv is None:
return tt == tokTT
elif tt is None:
return tv == tokTV
else:
return tv == tokTV and tt == tokTT | d16eb9c963addcdc5eb416dc627c18ee98ddd28c | 3,053 |
import json
import six
def _HandleJsonList(response, service, method, errors):
"""Extracts data from one *List response page as JSON and stores in dicts.
Args:
response: str, The *List response in JSON
service: The service which responded to *List request
method: str, Method used to list resources. One of 'List' or
'AggregatedList'.
errors: list, Errors from response will be appended to this list.
Returns:
Pair of:
- List of items returned in response as dicts
- Next page token (if present, otherwise None).
"""
items = []
response = json.loads(response)
# If the request is a list call, then yield the items directly.
if method == 'List':
items = response.get('items', [])
# If the request is an aggregatedList call, then do all the
# magic necessary to get the actual resources because the
# aggregatedList responses are very complicated data
# structures...
elif method == 'AggregatedList':
items_field_name = service.GetMethodConfig(
'AggregatedList').relative_path.split('/')[-1]
for scope_result in six.itervalues(response['items']):
# If the given scope is unreachable, record the warning
# message in the errors list.
warning = scope_result.get('warning', None)
if warning and warning['code'] == 'UNREACHABLE':
errors.append((None, warning['message']))
items.extend(scope_result.get(items_field_name, []))
return items, response.get('nextPageToken', None) | db87c9ed87df1268e1187f74c193b5f96f9e10f7 | 3,054 |
def clip(x, min_, max_):
"""Clip value `x` by [min_, max_]."""
return min_ if x < min_ else (max_ if x > max_ else x) | 3ad7625fa3dc5a0c06bb86dc16698f6129ee9034 | 3,055 |
def Usable(entity_type,entity_ids_arr):
"""Only for Linux modules"""
filNam = entity_ids_arr[0]
return filNam.endswith(".ko.xz") | d64aebf033fad9d81350b9221368c2208d9a003f | 3,057 |
from datetime import datetime
def time_span(ts):
"""计算时间差"""
delta = datetime.now() - ts.replace(tzinfo=None)
if delta.days >= 365:
return '%d年前' % (delta.days / 365)
elif delta.days >= 30:
return '%d个月前' % (delta.days / 30)
elif delta.days > 0:
return '%d天前' % delta.days
elif delta.seconds < 60:
return "%d秒前" % delta.seconds
elif delta.seconds < 60 * 60:
return "%d分钟前" % (delta.seconds / 60)
else:
return "%d小时前" % (delta.seconds / 60 / 60) | b93100a0ac3d7b7f45ea7f26b03a0f0149cce1a3 | 3,058 |
def escape_string(value):
"""escape_string escapes *value* but not surround it with quotes.
"""
value = value.replace('\\', '\\\\')
value = value.replace('\0', '\\0')
value = value.replace('\n', '\\n')
value = value.replace('\r', '\\r')
value = value.replace('\032', '\\Z')
value = value.replace("'", "\\'")
value = value.replace('"', '\\"')
return value | 1373ea81d22d246c0c0429d6588995e719bd61fb | 3,061 |
import os
import sys
def current_script_path() -> str:
"""
Return path to where the currently executing script is located
"""
return os.path.abspath(os.path.dirname(sys.argv[0])) | 09463ed76e9b688e39812203af27263b025f36e7 | 3,062 |
import unicodedata
def normalize_full_width(text):
"""
a function to normalize full width characters
"""
return unicodedata.normalize('NFKC', text) | f8b443089e7083e11f6539f4103ce05f616170c4 | 3,064 |
import random
def make_definitions(acronym, words_by_letter, limit=1):
"""Find definitions an acronym given groupings of words by letters"""
definitions = []
for _ in range(limit):
definition = []
for letter in acronym.lower():
opts = words_by_letter.get(letter.lower(), [])
definition.append(random.choice(opts).title() if opts else "?")
definitions.append(" ".join(definition))
return definitions | bc0af7b4e81a443c0afe62c2d77ace15bd1ab306 | 3,065 |
def coordinateToIndex(coordinate):
"""Return a raw index (e.g [4, 4]) from board coordinate (e.g. e4)"""
return [abs(int(coordinate[1]) - 8), ("a", "b", "c", "d", "e", "f", "g", "h").index(coordinate[0])] | d3dcf6d01c4bec2058cffef88867d45ba51ea560 | 3,069 |
import os
def full_file_names(file_dir):
"""
List all full file names(with extension) in target directory.
:param file_dir:
target directory.
:return:
a list containing full file names.
"""
for _, _, files in os.walk(file_dir):
return files | e70947c2ce1ff3eab5f7dd1074ba378498aedbf7 | 3,070 |
def get_parent(inst, rel_type='cloudify.relationships.contained_in'):
"""
Gets the parent of an instance
:param `cloudify.context.NodeInstanceContext` inst: Cloudify instance
:param string rel_type: Relationship type
:returns: Parent context
:rtype: :class:`cloudify.context.RelationshipSubjectContext` or None
"""
for rel in inst.relationships:
if rel_type in rel.type_hierarchy:
return rel.target
return None | 06bc76ec55735a47a3cf26df2daa4346290671ee | 3,071 |
def get_prime(num_dict):
"""获取字典里所有的素数"""
prime_dict = {}
for key, value in num_dict.items():
if value:
prime_dict.update({key: key})
return prime_dict | 49c62ae43bfe5af15f191cd8d831e82ae56c766d | 3,073 |
def get_shared_keys(param_list):
"""
For the given list of parameter dictionaries, return a list of the dictionary
keys that appear in every parameter dictionary
>>> get_shared_keys([{'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':3}, {'a':0, 'b':'beta'}])
['a', 'b']
>>> get_shared_keys([{'a':0, 'd':3}, {'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':2}])
['a']
"""
if not param_list:
return
keys = set(param_list[0].keys())
for i in range(1, len(param_list)):
keys = keys.intersection(param_list[i].keys())
keys = list(keys)
keys.sort()
return keys | 0f6aa0df4d61ba166ac7d660be80a98fdbc29080 | 3,074 |
def extract_begin_end(data):
""" Finds nif:beginIndex and nif:endIndex values.
:param data: Data sent by the client.
:return: Begin index and end index, -1 if error.
"""
try:
begin = data.split("nif:beginIndex")[1].split("\"")[1]
end = data.split("nif:endIndex")[1].split("\"")[1]
return int(begin), int(end)
except IndexError:
return -1, -1 | d5f5ce211f645f10d6a0aed1c6446963f0c3fe3e | 3,076 |
def prepare_file_hierarchy(path):
"""
Create a temporary folder structure like the following:
test_find_dotenv0/
└── child1
├── child2
│ └── child3
│ └── child4
└── .env
Then try to automatically `find_dotenv` starting in `child4`
"""
curr_dir = path
dirs = []
for f in ['child1', 'child2', 'child3', 'child4']:
curr_dir /= f
dirs.append(curr_dir)
curr_dir.mkdir()
return (dirs[0], dirs[-1]) | 25b66a7bc728f8f4b90cd9d8e678c914d2d60be9 | 3,077 |
def cmd2dict(cmd):
"""Returns a dictionary of what to replace each value by."""
pixel_count = cmd[cmd.shape[0] - 1, cmd.shape[1] - 1]
scaling_dict = dict()
for i in range(0, cmd.shape[0]):
scaling_dict[cmd[i, 0]] = round(
((cmd[i, 1] - cmd[0, 1]) / (pixel_count - cmd[0, 1])) * 255
)
return scaling_dict | 17f28fdcc5497c7d8d6aa55bbc61460e988586eb | 3,078 |
def _read_id_not_in_dict(read_ids, read_dict):
"""Return True if all read_ids in a list are not in the read_dict keys, otherwise False"""
for read_id in read_ids:
if read_id not in read_dict.keys():
return True
return False | 3a0e0926ed33f65cc67139311af1c860f3e371ae | 3,079 |
def odds_or_evens(my_bool, nums):
"""Returns all of the odd or
even numbers from a list"""
return_list = []
for num in nums:
if my_bool:
if num % 2 == 0:
return_list.append(num)
else:
if num % 2 != 0:
return_list.append(num)
return return_list | 02b3b12acbaae10b2b0e05eec059f6571c576e80 | 3,080 |
import copy
def words_to_indexes(tree):
"""Return a new tree based on the original tree, such that the leaf values
are replaced by their indexs."""
out = copy.deepcopy(tree)
leaves = out.leaves()
for index in range(0, len(leaves)):
path = out.leaf_treeposition(index)
out[path] = index + 1
return out | 99e4ad2aa1d318af21d934aee2128b8d7b51a99f | 3,082 |
import re
def standardizeName(name):
"""
Remove stuff not used by bngl
"""
name2 = name
sbml2BnglTranslationDict = {
"^": "",
"'": "",
"*": "m",
" ": "_",
"#": "sh",
":": "_",
"α": "a",
"β": "b",
"γ": "g",
" ": "",
"+": "pl",
"/": "_",
":": "_",
"-": "_",
".": "_",
"?": "unkn",
",": "_",
"(": "",
")": "",
"[": "",
"]": "",
# "(": "__",
# ")": "__",
# "[": "__",
# "]": "__",
">": "_",
"<": "_",
}
for element in sbml2BnglTranslationDict:
name = name.replace(element, sbml2BnglTranslationDict[element])
name = re.sub("[\W]", "", name)
return name | 33caf35feb0c9dcc042add501a4470b1ccbd3b1c | 3,085 |
def number_from_string(s):
"""
Parse and return number from string.
Return float only if number is not an int. Assume number can be parsed from
string.
"""
try:
return int(s)
except ValueError:
return float(s) | 50cc7defe7c60b536d184aaf91c2831ab63043e1 | 3,086 |
def acceptable(*args, acceptables):
"""
If the characters in StringVars passed as arguments are in acceptables return True, else returns False
"""
for arg in args:
for char in arg:
if char.lower() not in acceptables:
return False
return True | 607cc752fb61e8a9348bfdd889afcbb8a8ee5189 | 3,087 |
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# rgb of each pixel
pixel_r = 0
pixel_g = 0
pixel_b = 0
# how many pixels in the list[pixels]
n = 0
for pixel in pixels:
n += 1
pixel_r += pixel.red
pixel_g += pixel.green
pixel_b += pixel.blue
pixel_avg = [pixel_r//n, pixel_g//n, pixel_b//n]
return pixel_avg | 9cd694505f8d445732bc178b5d645ff273b298d1 | 3,088 |
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i | b28daa2845618df5030a79129bb7cec1167b149a | 3,089 |
def _get_marker_indices(marker, line):
""" method to find the start and end parameter markers
on a template file line. Used by write_to_template()
"""
indices = [i for i, ltr in enumerate(line) if ltr == marker]
start = indices[0:-1:2]
end = [i + 1 for i in indices[1::2]]
assert len(start) == len(end)
return start, end | 4e68f6629fd94920ddc6290c75d92e8de7b467bb | 3,090 |
import os
def get_number_of_images(dir):
"""
Returns number of files in given directory
Input:
dir - full path of directory
Output:
number of files in directory
"""
return len([name for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name))]) | a964764466aea735558a8ccc832bd0a00616883e | 3,091 |
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function.
.. versionadded:: 3.0
"""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__) | c1c31c45a059c4ee56b39322e966d30b742ef86e | 3,092 |
import os
def get_torch_core_binaries(module):
"""Return required files from the torch folders.
Notes:
So far only tested for Windows. Requirements for other platforms
are unknown.
"""
binaries = []
torch_dir = module.getCompileTimeDirectory()
extras = os.path.join(torch_dir, "lib")
if os.path.isdir(extras):
for f in os.listdir(extras):
# apart from shared libs, also the C header files are required!
if f.endswith((".dll", ".so", ".h")) or ".so." in f:
item = os.path.join(extras, f)
if os.path.isfile(item):
binaries.append((item, "."))
# this folder exists in the Linux version
extras = os.path.join(torch_dir, "bin")
if os.path.isdir(extras):
for f in os.listdir(extras):
item = os.path.join(extras, f)
if os.path.isfile(item):
binaries.append((item, "."))
# this folder exists in the Linux version
extras = os.path.join(torch_dir, "include")
if os.path.isdir(extras):
for root, _, files in os.walk(extras):
for f in files:
item = os.path.join(root, f)
if os.path.isfile(item):
binaries.append((item, "."))
return binaries | df1aa86f75fa444707ed3499b30f2806389d914c | 3,096 |