content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import torch
def get_adjacent_th(spec: torch.Tensor, filter_length: int = 5) -> torch.Tensor:
"""Zero-pad and unfold stft, i.e.,
add zeros to the beginning so that, using the multi-frame signal model,
there will be as many output frames as input frames.
Args:
spec (torch.Tensor): input spectrum (B, F, T, 2)
filter_length (int): length for frame extension
Returns:
ret (torch.Tensor): output spectrum (B, F, T, filter_length, 2)
""" # noqa: D400
return (
torch.nn.functional.pad(spec, pad=[0, 0, filter_length - 1, 0])
.unfold(dimension=-2, size=filter_length, step=1)
.transpose(-2, -1)
.contiguous()
) | 4009b41fd4e729e16c749f4893f61b61ca922215 | 709,234 |
def K2(eps):
""" Radar dielectric factor |K|**2
Parameters
----------
eps : complex
nd array of complex relative dielectric constants
Returns
-------
nd - float
Radar dielectric factor |K|**2 real
"""
K_complex = (eps-1.0)/(eps+2.0)
return (K_complex*K_complex.conj()).real | 8754bee38a46de14d205764c4843cad7c4d5d88f | 709,235 |
import torch
def projection_from_Rt(rmat, tvec):
"""
Compute the projection matrix from Rotation and translation.
"""
assert len(rmat.shape) >= 2 and rmat.shape[-2:] == (3, 3), rmat.shape
assert len(tvec.shape) >= 2 and tvec.shape[-2:] == (3, 1), tvec.shape
return torch.cat([rmat, tvec], dim=-1) | 90039ba7002be31d347b7793d542b1ff37abae3e | 709,236 |
import re
def find_version():
"""Extract the version number from the CLI source file."""
with open('pyweek.py') as f:
for l in f:
mo = re.match('__version__ = *(.*)?\s*', l)
if mo:
return eval(mo.group(1))
else:
raise Exception("No version information found.") | 128f2399a37b27412d2fdf6cf0901c1486709a09 | 709,238 |
def _transform_playlist(playlist):
"""Transform result into a format that more
closely matches our unified API.
"""
transformed_playlist = dict([
('source_type', 'spotify'),
('source_id', playlist['id']),
('name', playlist['name']),
('tracks', playlist['tracks']['total']),
])
return transformed_playlist | 62c19c132cbb9438c7a4b993e1d79111b79b86fd | 709,239 |
def numpy_to_python_type(value):
"""
Convert to Python type from numpy with .item().
"""
try:
return value.item()
except AttributeError:
return value | f1d3a8ad77932342c182d7be76037fee3c869afe | 709,240 |
def threshold_abs(image, threshold):
"""Return thresholded image from an absolute cutoff."""
return image > threshold | 5032f632371af37e81c3ebcc587475422d5ff2bf | 709,241 |
def hexlen(x):
"""
Returns the string length of 'x' in hex format.
"""
return len(hex(x))+2 | 404ec4c3656bb35b87df6ae147db93922f2da059 | 709,242 |
def list_manipulation(lst, command, location, value=None):
"""Mutate lst to add/remove from beginning or end.
- lst: list of values
- command: command, either "remove" or "add"
- location: location to remove/add, either "beginning" or "end"
- value: when adding, value to add
remove: remove item at beginning or end, and return item removed
>>> lst = [1, 2, 3]
>>> list_manipulation(lst, 'remove', 'end')
3
>>> list_manipulation(lst, 'remove', 'beginning')
1
>>> lst
[2]
add: add item at beginning/end, and return list
>>> lst = [1, 2, 3]
>>> list_manipulation(lst, 'add', 'beginning', 20)
[20, 1, 2, 3]
>>> list_manipulation(lst, 'add', 'end', 30)
[20, 1, 2, 3, 30]
>>> lst
[20, 1, 2, 3, 30]
Invalid commands or locations should return None:
>>> list_manipulation(lst, 'foo', 'end') is None
True
>>> list_manipulation(lst, 'add', 'dunno') is None
True
"""
if command == "remove":
if location == "end":
return lst.pop()
elif location == "beginning":
return lst.pop(0)
elif command == "add":
if location == "beginning":
lst.insert(0,value)
return lst
elif location == "end":
lst.append(value)
return lst | c847257ea5508f60b84282c3ac8237b43cd3825a | 709,243 |
def get_image_info(doc):
"""Create dictionary with key->id, values->image information
"""
id_img = dict()
#add image information
for img_infor in doc['images']:
filename = img_infor['file_name']
width = img_infor['width']
height = img_infor['height']
id_img[img_infor['id']] = [filename, width, height]
return id_img | b8c91e67572e5863f773db579ce26fa86530f32e | 709,245 |
import re
def get_svg_size(filename):
"""return width and height of a svg"""
with open(filename) as f:
lines = f.read().split('\n')
width, height = None, None
for l in lines:
res = re.findall('<svg.*width="(\d+)pt".*height="(\d+)pt"', l)
if len(res) > 0:
# need to scale up, maybe due to omni-graffle
scale = 2
width = round(scale*float(res[0][0]))
height = round(scale*float(res[0][1]))
res = re.findall('width="([.\d]+)', l)
if len(res) > 0:
width = round(float(res[0]))
res = re.findall('height="([.\d]+)', l)
if len(res) > 0:
height = round(float(res[0]))
if width is not None and height is not None:
return width, height
assert False, 'cannot find height and width for ' + filename | 7732df636657950b050be409ef2439c975d6940d | 709,246 |
import collections
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts | 561dfe8c18810ce40ce4c0ff391d6838816de116 | 709,248 |
import re
def find_version(infile):
"""
Given an open file (or some other iterator of lines) holding a
configure.ac file, find the current version line.
"""
for line in infile:
m = re.search(r'AC_INIT\(\[tor\],\s*\[([^\]]*)\]\)', line)
if m:
return m.group(1)
return None | 35ac18757ee1156f046bbd9ffa68ed4898bc317a | 709,250 |
import math
def linear_warmup_decay(warmup_steps, total_steps, cosine=True, linear=False):
"""
Linear warmup for warmup_steps, optionally with cosine annealing or
linear decay to 0 at total_steps
"""
# check if both decays are not True at the same time
assert not (linear and cosine)
def fn(step):
if step < warmup_steps:
return float(step) / float(max(1, warmup_steps))
if not (cosine or linear):
# no decay
return 1.0
progress = float(step - warmup_steps) / float(
max(1, total_steps - warmup_steps)
)
if cosine:
# cosine decay
return 0.5 * (1.0 + math.cos(math.pi * progress))
# linear decay
return 1.0 - progress
return fn | 9326622a07be677cb82744a30850674ca3c5f789 | 709,251 |
def get_geneids_of_user_entity_ids(cursor, unification_table, user_entity_ids):
"""
Get the Entrez Gene IDs of targets using their BIANA user entity ids
"""
query_geneid = ("""SELECT G.value, G.type
FROM externalEntityGeneID G, {} U
WHERE U.externalEntityID = G.externalEntityID AND U.userEntityID = %s
""".format(unification_table))
print('\nRETRIEVING GENE IDS ASSOCIATED TO USER ENTITY IDS...\n')
ueid_to_geneid_to_types = {}
for ueid in user_entity_ids:
cursor.execute(query_geneid, (ueid,))
for row in cursor:
geneid, geneid_type = row
#print(ueid, geneid, geneid_type)
ueid_to_geneid_to_types.setdefault(ueid, {})
ueid_to_geneid_to_types[ueid].setdefault(str(geneid), set()).add(geneid_type.lower())
print('NUMBER OF USER ENTITIES ASSOCIATED WITH GENE IDS: {}'.format(len(ueid_to_geneid_to_types)))
return ueid_to_geneid_to_types | bf192c192352da64716ecab6b4523b50fea5cd0f | 709,252 |
import os
def get_path(root, path):
"""
Shortcut for ``os.path.join(os.path.dirname(root), path)``.
:param root: root path
:param path: path to file or folder
:returns: path to file or folder relative to root
"""
return os.path.join(os.path.dirname(root), path) | 73974d6d54210615b51d3765d1e5dd0d715080f1 | 709,253 |
def int_array_to_hex(iv_array):
"""
Converts an integer array to a hex string.
"""
iv_hex = ''
for b in iv_array:
iv_hex += '{:02x}'.format(b)
return iv_hex | f3332b7672a266ad9cae9fc52bc8e1152bcee58b | 709,254 |
import functools
def to_decorator(wrapped_func):
"""
Encapsulates the decorator logic for most common use cases.
Expects a wrapped function with compatible type signature to:
wrapped_func(func, args, kwargs, *outer_args, **outer_kwargs)
Example:
@to_decorator
def foo(func, args, kwargs):
print(func)
return func(*args, **kwargs)
@foo()
def bar():
print(42)
"""
@functools.wraps(wrapped_func)
def arg_wrapper(*outer_args, **outer_kwargs):
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return wrapped_func(func,
args,
kwargs,
*outer_args,
**outer_kwargs)
return wrapped
return decorator
return arg_wrapper | d7c9d0e759e59c26b7c5f7b098e15b78314c8860 | 709,255 |
def scale(a: tuple, scalar: float) -> tuple:
"""Scales the point."""
return a[0] * scalar, a[1] * scalar | 9638b8cfbd792c2deb35da304c5c375e0402404e | 709,256 |
def timedelta_to_seconds(ts):
""" Convert the TimedeltaIndex of a pandas.Series into a numpy
array of seconds. """
seconds = ts.index.values.astype(float)
seconds -= seconds[-1]
seconds /= 1e9
return seconds | 4565d7a691e8ac004d9d529568db0d032a56d088 | 709,260 |
def parse_gage(s):
"""Parse a streamgage key-value pair.
Parse a streamgage key-value pair, separated by '='; that's the reverse of ShellArgs.
On the command line (argparse) a declaration will typically look like::
foo=hello or foo="hello world"
:param s: str
:rtype: tuple(key, value)
"""
# Adapted from: https://gist.github.com/fralau/061a4f6c13251367ef1d9a9a99fb3e8d
items = s.split('=')
key = items[0].strip() # we remove blanks around keys, as is logical
value = ''
if len(items) > 1:
# rejoin the rest:
value = '='.join(items[1:])
return key, value | 299b47f3a4757c924620bdc05e74f195a4cb7967 | 709,261 |
def calc_internal_hours(entries):
"""
Calculates internal utilizable hours from an array of entry dictionaries
"""
internal_hours = 0.0
for entry in entries:
if entry['project_name'][:22] == "TTS Acq / Internal Acq" and not entry['billable']:
internal_hours = internal_hours + float(entry['hours_spent'])
return internal_hours | 0962ee49f60ac296668294e6d2f075ce981cbc55 | 709,262 |
def format_str_strip(form_data, key):
"""
"""
if key not in form_data:
return ''
return form_data[key].strip() | 44c5aaf8c5e11bfee05971d2961e5dcaf4cd8d9f | 709,263 |
def callable_or_raise(obj):
"""Check that an object is callable, else raise a :exc:`ValueError`.
"""
if not callable(obj):
raise ValueError('Object {0!r} is not callable.'.format(obj))
return obj | cb6dd8c03ea41bb94a8357553b3f3998ffcc0d65 | 709,264 |
def hello(name=None):
"""Assuming that name is a String and it checks for user typos to return a name with a first capital letter (Xxxx).
Args:
name (str): A persons name.
Returns:
str: "Hello, Name!" to a given name, or says Hello, World! if name is not given (or passed as an empty String).
"""
return "Hello, World!" if name is None or not name else "Hello, {}!".format(name.title()) | f1aafbebd49507fd5417d8752f98ae7d0af8ec33 | 709,266 |
from functools import reduce
def min_column_widths(rows):
"""Computes the minimum column width for the table of strings.
>>> min_column_widths([["some", "fields"], ["other", "line"]])
[5, 6]
"""
def lengths(row): return map(len, row)
def maximums(row1, row2) : return map(max, row1, row2)
return reduce(maximums, map(lengths, rows)) | 36722e4250dde561836c1ea3042b796ed7650986 | 709,267 |
import json
def parse_json(json_path):
"""
Parse training params json file to python dictionary
:param json_path: path to training params json file
:return: python dict
"""
with open(json_path) as f:
d = json.load(f)
return d | c34b241813996a8245ea8c334de72f0fbffe8a31 | 709,268 |
def sort(array: list[int]) -> list[int]:
"""Counting sort implementation.
"""
result: list[int] = [0, ] * len(array)
low: int = min(array)
high: int = max(array)
count_array: list[int] = [0 for i in range(low, high + 1)]
for i in array:
count_array[i - low] += 1
for j in range(1, len(count_array)):
count_array[j] += count_array[j - 1]
for k in reversed(array):
result[count_array[k - low] - 1] = k
count_array[k - low] -= 1
return result | 86864db6e012d5e6afcded3365d6f2ca35a5b94b | 709,269 |
def create_dic(udic):
"""
Create a glue dictionary from a universal dictionary
"""
return udic | aa854bb8f4d23da7e37aa74727446d7436524fe2 | 709,270 |
def expand_not(tweets):
"""
DESCRIPTION:
In informal speech, which is widely used in social media, it is common to use contractions of words
(e.g., don't instead of do not).
This may result in misinterpreting the meaning of a phrase especially in the case of negations.
This function expands these contractions and other similar ones (e.g it's --> it is etc...).
INPUT:
tweets: Series of a set of tweets as a python strings
OUTPUT:
Series of filtered tweets
"""
tweets = tweets.str.replace('n\'t', ' not', case=False)
tweets = tweets.str.replace('i\'m', 'i am', case=False)
tweets = tweets.str.replace('\'re', ' are', case=False)
tweets = tweets.str.replace('it\'s', 'it is', case=False)
tweets = tweets.str.replace('that\'s', 'that is', case=False)
tweets = tweets.str.replace('\'ll', ' will', case=False)
tweets = tweets.str.replace('\'l', ' will', case=False)
tweets = tweets.str.replace('\'ve', ' have', case=False)
tweets = tweets.str.replace('\'d', ' would', case=False)
tweets = tweets.str.replace('he\'s', 'he is', case=False)
tweets = tweets.str.replace('what\'s', 'what is', case=False)
tweets = tweets.str.replace('who\'s', 'who is', case=False)
tweets = tweets.str.replace('\'s', '', case=False)
for punct in ['!', '?', '.']:
regex = "(\\"+punct+"( *)){2,}"
tweets = tweets.str.replace(regex, punct+' <repeat> ', case=False)
return tweets | 66f4ed5c7321fe7bf5ea0d350980394a235d99e6 | 709,271 |
def get_branch_index(BRANCHES, branch_name):
"""
Get the place of the branch name in the array of BRANCHES so will know into which next branch to merge - the next one in array.
"""
i = 0
for branch in BRANCHES:
if branch_name == branch:
return i
else:
i = i + 1 | c983bab67b3aa0cd1468c39f19732395c7e376f9 | 709,272 |
from bs4 import BeautifulSoup
def prettify_save(soup_objects_list, output_file_name):
"""
Saves the results of get_soup() function to a text file.
Parameters:
-----------
soup_object_list:
list of BeautifulSoup objects to be saved to the text file
output_file_name:
entered as string with quotations and with extension .txt , used to name the output text file
This function can work independent of the rest of the library.
Note:
Unique to Windows, open() needs argument: encoding = 'utf8' for it to work.
"""
prettified_soup = [BeautifulSoup.prettify(k) for k in soup_objects_list]
custom_word_added = [m + 'BREAKHERE' for m in prettified_soup]
one_string = "".join(custom_word_added)
# unique to Windows, open() needs argument: encoding = "utf8"
with open(output_file_name, 'w') as file:
file.write(one_string)
return None | 3de5b7df49837c24e89d2ded286c0098069945fd | 709,273 |
def _to_original(sequence, result):
""" Cast result into the same type
>>> _to_original([], ())
[]
>>> _to_original((), [])
()
"""
if isinstance(sequence, tuple):
return tuple(result)
if isinstance(sequence, list):
return list(result)
return result | 7b9d8d1d2b119d61b43dde253d8d3c48bd0e45b8 | 709,274 |
def get_B_R(Rdot):
"""Get B_R from Q, Qdot"""
return Rdot | 696932b9bf423289bdcf91287b0d789007322852 | 709,275 |
def ensureList(obj):
""" ensures that object is list """
if isinstance(obj, list):
return obj # returns original lis
elif hasattr(obj, '__iter__'): # for python 2.x check if obj is iterablet
return list(obj) # converts to list
else:
return [obj] | f845658fda36a583ac54caed1e6493d331c910fa | 709,276 |
def bytes_to_unicode_records(byte_string, delimiter, encoding):
""" Convert a byte string to a tuple containing an array of unicode
records and any remainder to be used as a prefix next time. """
string = byte_string.decode(encoding)
records = string.split(delimiter)
return (records[:-1], records[-1].encode(encoding)) | ccc3591551a6b316843cc8eafb33e45627eac752 | 709,278 |
import copy
def get_state_transitions(actions):
"""
get the next state
@param actions:
@return: tuple (current_state, action, nextstate)
"""
state_transition_pairs = []
for action in actions:
current_state = action[0]
id = action[1][0]
next_path = action[1][1]
next_state = copy.deepcopy(current_state)
if 'NoTrans' not in id:
# change the state
next_state[id] = next_path
state_transition_pairs.append((current_state, action[1], next_state))
return state_transition_pairs | bbed37ed6469f5635fbc65fa07195114b4bb3dac | 709,281 |
import re
import importlib
def import_config_module( cfg_file ):
""" Returns valid imported config module.
"""
cfg_file = re.sub( r'\.py$', '', cfg_file )
cfg_file = re.sub( r'-', '_', cfg_file )
mod_name = 'config.' + cfg_file
cfg_mod = importlib.import_module( mod_name )
if not hasattr( cfg_mod, 'pre_start_config' ):
raise ImportError( 'Config file must define \'pre_start_config\' method' )
if not hasattr( cfg_mod, 'post_start_config' ):
raise ImportError( 'Config file must define \'post_start_config\' method' )
return cfg_mod | 4cb25a56df0f26f0f3c4917aad2ca4cd40e4797f | 709,282 |
import re
def tpc(fastas, **kw):
"""
Function to generate tpc encoding for protein sequences
:param fastas:
:param kw:
:return:
"""
AA = kw['order'] if kw['order'] is not None else 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA]
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
tmpCode = [0] * 8000
for j in range(len(sequence) - 3 + 1):
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] = \
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings | b8017356980b266d78d85a867aee97c0d79ec5e5 | 709,283 |
def remove_key(d, key):
"""Safely remove the `key` from the dictionary.
Safely remove the `key` from the dictionary `d` by first
making a copy of dictionary. Return the new dictionary together
with the value stored for the `key`.
Parameters
----------
d : dict
The dictionary from which to remove the `key`.
key :
The key to remove
Returns
-------
v :
The value for the key
r : dict
The dictionary with the key removed.
"""
r = dict(d)
v = r[key]
del r[key]
return v, r | 5695b18675b52f4ca8bc3cba1ed0104425e7a04f | 709,285 |
import os
import torch
def load_model(filename, folder=None):
"""
Load a model from a file.
:param filename: name of the file to load the model from
:param folder: name of the subdirectory folder. If given, the model will be loaded from the subdirectory.
:return: model from the file
"""
if folder is not None:
path = os.path.join("./models", folder, filename)
else:
path = os.path.join("./models", filename)
model = torch.load(path, map_location='cuda:0')
return model | b4319796de4b05bf83d657c29d31124016dd9070 | 709,286 |
def get_version():
"""Returns single integer number with the serialization version"""
return 2 | f25ad858441fcbb3b5353202a53f6ebaa8874e4d | 709,287 |
def is_reviewer(user):
"""Return True if this user is a financial aid reviewer"""
# no need to cache here, all the DB lookups used during has_perm
# are already cached
return user.has_perm("finaid.review_financial_aid") | e3c599f78eb51c33ab48e3760c0f2965ba305916 | 709,288 |
def isnonempty(value):
"""
Return whether the value is not empty
Examples::
>>> isnonempty('a')
True
>>> isnonempty('')
False
:param value: string to validate whether value is not empty
"""
return value != '' | 0250cb455d8f77027d5cde9101a24683950bbdb2 | 709,289 |
import tempfile
import os
def _get_thintar_prefix(tarname):
"""
Make sure thintar temporary name is concurrent and secure.
:param tarname: name of the chosen tarball
:return: prefixed tarname
"""
tfd, tmp_tarname = tempfile.mkstemp(
dir=os.path.dirname(tarname),
prefix=".thin-",
suffix=os.path.splitext(tarname)[1],
)
os.close(tfd)
return tmp_tarname | e893309972742ffb52fd13911b5805e51b2baadc | 709,291 |
def company(anon, obj, field, val):
"""
Generates a random company name
"""
return anon.faker.company(field=field) | 95580147817a37542f75e2c728941a159cd30bd3 | 709,292 |
def write_obs(mdict, obslist, flag=0):
"""
"""
# Print epoch
epoch = mdict['epoch']
res = epoch.strftime("> %Y %m %d %H %M %S.") + '{0:06d}0'.format(int(epoch.microsecond))
# Epoch flag
res += " {0:2d}".format(flag)
# Num sats
res += " {0:2d}".format(len(mdict)-1)
res += '\n'
# For each satellite, print obs
for sat in mdict:
if sat == 'epoch':
continue
res += sat
obstypes = obslist[sat[0]]
for o in obstypes:
try:
meas = mdict[sat][o]
except KeyError:
meas = 0.0
# BeiDou satellites can have long ranges if GEO satellites are used
if meas > 40e6:
meas = 0.0
res += '{0:14.3f}00'.format(meas)
res += '\n'
return res | 5a91b02fce07f455f4442fe6fbf76d3609f5a74e | 709,294 |
from collections import Counter
def unk_emb_stats(sentences, emb):
"""Compute some statistics about unknown tokens in sentences
such as "how many sentences contain an unknown token?".
emb can be gensim KeyedVectors or any other object implementing
__contains__
"""
stats = {
"sents": 0,
"tokens": 0,
"unk_tokens": 0,
"unk_types": 0,
"unk_tokens_lower": 0,
"unk_types_lower": 0,
"sents_with_unk_token": 0,
"sents_with_unk_token_lower": 0}
all_types = set()
for sent in sentences:
stats["sents"] += 1
any_unk_token = False
any_unk_token_lower = False
types = Counter(sent)
for ty, freq in types.items():
all_types.add(ty)
stats["tokens"] += freq
unk = ty not in emb
if unk:
any_unk_token = True
stats["unk_types"] += 1
stats["unk_tokens"] += freq
if unk and ty.lower() not in emb:
any_unk_token_lower = True
stats["unk_types_lower"] += 1
stats["unk_tokens_lower"] += freq
if any_unk_token:
stats["sents_with_unk_token"] += 1
if any_unk_token_lower:
stats["sents_with_unk_token_lower"] += 1
stats["types"] = len(all_types)
return stats | 221b88e2124f3b8da2976a337476a11a7276a470 | 709,295 |
import os
def basename(path: str) -> str:
"""Returns the basename removing path and extension."""
return os.path.splitext(os.path.basename(path))[0] | 63e8e0220d1c2a9fc5b30d4bff2b609517d8cd18 | 709,296 |
def jump(inst_ptr, program, direction):
"""Jump the instruction pointer in the program until matching bracket"""
count = direction
while count != 0:
inst_ptr += direction
char = program[inst_ptr]
if char == '[':
count += 1
elif char == ']':
count -= 1
else:
pass
return inst_ptr | 76c6c4dcf4dbc452e9f2b252522871fcca95c75d | 709,297 |
import colorsys
def summaryhsl(all_summaries, summary):
"""
Choose a color for the given system summary to distinguish it from other types of systems.
Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable.
"""
lowest_att = min(att for att, ms in all_summaries)
highest_att = max(att for att, ms in all_summaries)
att_range = highest_att - lowest_att + 1
attractors, monotonic_species = summary
lowest_ms = min(ms for att, ms in all_summaries if att == attractors)
highest_ms = max(ms for att, ms in all_summaries if att == attractors)
ms_range = highest_ms - lowest_ms + 1
bin_width = 1 / (ms_range + 1) / att_range
hue = ((highest_att - attractors) / att_range) + (highest_ms - monotonic_species) * bin_width
variability_squeeze = (2 if att_range > 1 else 1) * (2 if ms_range > 1 else 1)
return hue, 1, colorsys.ONE_THIRD, bin_width / variability_squeeze | 1e874aaa359a5d8bb566809fc2be212df2890885 | 709,298 |
def remove_duplicates_from_list(params_list):
"""
Common function to remove duplicates from a list
Author: Chaitanya-vella.kumar@broadcom.com
:param params_list:
:return:
"""
if params_list:
return list(dict.fromkeys(params_list))
return list() | 885b2e048ec672bd2d24fabe25066bc2df3ea8a8 | 709,299 |
import math
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects
:param aLocation1: starting location
:param aLocation2: ending location
:return:
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
dlong_c = dlong*math.cos(math.radians(aLocation1.lat))
return math.sqrt((dlat * dlat) + (dlong_c * dlong_c)) * 1.113195e5 | 5f1428c099f79ba8b41177f87e6a3bffed13e00b | 709,300 |
def GetCLInfo(cl_info_str):
"""Gets CL's repo_name and revision."""
return cl_info_str.split('/') | d077216b2804c249a7d0ffdbff7f992dde106501 | 709,301 |
import zipfile
import os
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination | 267dedb78495e02bbeb2ffaddcfe2278ab72be67 | 709,302 |
import typing
def discord_api_call(method: str, params: typing.Dict, func, data, token: str) -> typing.Any:
""" Calls Discord API. """
# This code is from my other repo -> https://gtihub.com/kirillzhosul/python-discord-token-grabber
# Calling.
return func(
f"https://discord.com/api/{method}",
params=params,
headers={
"Authorization": f"{token}",
"Content-Type": "application/json"
},
data=data
) | 84ea201c88dd4260bbc80dbd45654c01cb5a36ee | 709,303 |
def get_slice_test(eval_kwargs, test_kwargs, test_dataloader, robustness_testing_datasets):
"""
Args:
test_dataloader:
test_kwargs:
eval_kwargs (dict):
test_dataloader (Dataloader):
robustness_testing_datasets (dict):
Returns:
"""
slice_test = None
if 'slice' in robustness_testing_datasets:
slice_kwargs = {'dataset': robustness_testing_datasets['slice']}
if 'sampler' in test_kwargs:
slice_kwargs['sampler'] = test_kwargs['sampler']
slice_kwargs.update(eval_kwargs)
slice_test = test_dataloader(**slice_kwargs)
return slice_test | b995ff26fd743f106115c5d5958dd0654e0d4645 | 709,304 |
def transform_config(cfg, split_1='search:', split_2='known_papers:'):
"""Ugly function to make cfg.yml less ugly."""
before_search, after_search = cfg.split(split_1, 1)
search_default, papers_default = after_search.split(split_2, 1)
search, paper_comment = '', ''
for line in search_default.splitlines():
line = line.strip()
if line:
if line.startswith('-'):
search += ' '
elif line.startswith('# List of paper ids'):
paper_comment = line
continue
search += ' ' + line + '\n'
ok = papers_default
if '-' in papers_default:
ok = ' ['
for line in papers_default.splitlines():
line = line.strip()
if '-' in line:
ok += line.split('- ')[1] + ', '
ok = ok[:-2] + ']'
return f"{before_search}{split_1}\n{search}{paper_comment}\n{split_2}{ok}" | 78d079b6b06c8426be2b65307782129c414a42c4 | 709,305 |
import torch
def color2position(C, min=None, max=None):
"""
Converts the input points set into colors
Parameters
----------
C : Tensor
the input color tensor
min : float (optional)
the minimum value for the points set. If None it will be set to -1 (default is None)
max : float (optional)
the maximum value for the points set. If None it will be set to +1 (default is None)
Returns
-------
Tensor
the points set tensor
"""
if min is None:
min = -1
if max is None:
max = 1
return torch.add(torch.mul(C, max-min), min) | 809d8cfd6f24e6abb6d65d5b576cc0b0ccbc3fdf | 709,306 |
def parse_dotted_path(path):
"""
Extracts attribute name from dotted path.
"""
try:
objects, attr = path.rsplit('.', 1)
except ValueError:
objects = None
attr = path
return objects, attr | 4685fad6461286b957a8d0056df2146fdd0f2e55 | 709,307 |
def custom_formatter(code, msg):
""" 自定义结果格式化函数
:param code: 响应码
:param msg: 响应消息
"""
return {
"code": code,
"msg": "hello",
"sss": "tt",
} | 59a7e3f9f03f9afc42b8faec6ebe23f5373d0bf0 | 709,308 |
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate | 1883f480852d49e63c0408c9ef0daeba9e50db6b | 709,309 |
def simple_url_formatter(endpoint, url):
"""
A simple URL formatter to use when no application context
is available.
:param str endpoint: the endpoint to use.
:param str url: the URL to format
"""
return u"/{}".format(url) | 74f3e68fe10f7cc6bf8bfe81a7349a995bb79fa3 | 709,310 |
from typing import List
def generate_service(
name: str,
image: str,
ports: List[str] = [],
volumes: List[str] = [],
dependsOn: List[str] = [],
) -> str:
"""
Creates a string with docker compose service specification.
Arguments are a list of values that need to be added to each section
named after the parameter. i.e. the volume arguments are for the
volumes section of the service config.
"""
indent = ' '
service = "{s}{name}:\n{s}{s}image: {image}\n".format(
s=indent,
name=name,
image=image,
)
if ports:
service += "{s}ports:\n".format(s=indent*2)
for port in ports:
service += '{s}- "{port}"\n'.format(s=indent*3, port=port)
if volumes:
service += "{s}volumes:\n".format(s=indent*2)
for vol in volumes:
service += '{s}- {vol}\n'.format(s=indent*3, vol=vol)
if dependsOn:
service += "{s}depends_on:\n".format(s=indent*2)
for item in dependsOn:
service += '{s}- "{dep}"\n'.format(s=indent*3, dep=item)
return service | 581e37e69d73ab5b6c0ac533bd91e7b5cb5187d9 | 709,311 |
def convert_to_squad(story_summary_content, question_content, set_type):
"""
:param story_summary_content:
:param question_content:
:param category_content:
:param set_type:
:return: formatted SQUAD data
At initial version, we are just focusing on the context and question, nothing more,
therefore we are ignoring the answer part as of now
"""
squad_formatted_content = dict()
squad_formatted_content['version'] = 'narrativeqa_squad_format'
data = []
content = story_summary_content
if set_type != 'all':
content = story_summary_content[story_summary_content['set'] == set_type]
for datum in content.itertuples(index=False):
#print(datum.summary)
data_ELEMENT = dict()
data_ELEMENT['title'] = 'dummyTitle'
paragraphs = []
paragraphs_ELEMENT = dict()
superdocument = datum.summary
paragraphs_ELEMENT['context'] = superdocument
qas = []
sub_datum = question_content[question_content['document_id'] == datum.document_id]
for q_datum in sub_datum.itertuples():
# print(indx)
#print(q_datum)
qas_ELEMENT = dict()
ANSWERS_ELEMENT = dict()
qas_ELEMENT_ANSWERS = []
qas_ELEMENT['id'] = q_datum.document_id + '-' + str(q_datum.Index)
qas_ELEMENT['question'] = q_datum.question
ANSWERS_ELEMENT['answer_start'] = -1
ANSWERS_ELEMENT['text'] = 'dummyAnswer'
qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT)
qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS
qas.append(qas_ELEMENT)
paragraphs_ELEMENT['qas'] = qas
paragraphs.append(paragraphs_ELEMENT)
data_ELEMENT['paragraphs'] = paragraphs
data.append(data_ELEMENT)
squad_formatted_content['data'] = data
return squad_formatted_content | 5b884ef521af4d5835fef25f01cb1f11d68cfafb | 709,314 |
from typing import Any
def inject_python_resources() -> dict[str, Any]:
"""
Inject common resources to be used in Jinja templates.
"""
return dict(
isinstance=isinstance,
zip=zip,
enumerate=enumerate,
len=len,
str=str,
bool=bool,
int=int,
float=float,
) | 98fb7fbf39f20b9972ef5c0d35ae12b2864580b2 | 709,315 |
import json
def possibilities(q=0, *num):
"""
:param q: Número de quadrados a considerar
:param num: Em quantos quadrados a soma do nº de bombas é 1
:return:
pos -> Possibilidade de distribuição das bombas
tot -> Número de quadrados nos quais só há uma bomba
i -> Início da contagem dos quadrados onde a soma das bombas é 1
"""
lbn = []
lp = []
num = str(num).replace('(', '[').replace(')', ']')
num = json.loads(num)
for c4 in range(0, len(num)):
num[c4] += ['']
for c1 in range(0, 2 ** q):
pos = []
bn = str(bin(c1)).replace('0b', '') # bn = int(bn, base=2) -> Reverte o processo
bn = bn.rjust(q, '0')
pos += bn
ts = 0
for c2 in range(0, len(num)):
i = num[c2][0]
tot = num[c2][1] # print(bn, tot, pos)
s = 0
for c3 in range(i, tot + i):
if pos[c3] == '1':
s += 1
if num[c2][3] != '':
# print(num[c2], pos[num[c2][3]])
if pos[num[c2][3]] == '1':
s += 1
if s == num[c2][2]:
ts += 1
# print(bn, s)
if ts == len(num):
lbn += [bn]
for c5 in range(0, q):
lp += [0]
for item in lbn:
for c6 in range(0, q):
if item[c6] == '1':
lp[c6] += 1
return lp | 94c126a1bacf5bb242ad2935f949ab146f847001 | 709,316 |
def generate_free_rooms(room_times: dict) -> dict:
"""
Generates data structure for getting free rooms for each time.
"""
# create data format
free_rooms = {'M': {},
'Tu': {},
'W': {},
'Th': {},
'F': {}
}
# add empty lists for each time
for dotw in free_rooms:
for i in range(0, 144):
free_rooms[dotw][i] = []
# iterate through all the rooms. days, and times
for room in room_times:
for day in room_times[room]:
for time in room_times[room][day]:
# add the room to the corresponding time
free_rooms[day][time].append(room)
return free_rooms | e60df355acd84e60c08ba34a45a2131d8d4519b4 | 709,317 |
def is_amazon(source_code):
"""
Method checks whether a given book is a physical book or a ebook giveaway for a linked Amazon account.
:param source_code:
:return:
"""
for line in source_code:
if "Your Amazon Account" in line:
return True
return False | 31c50622b4bb97a05d8cabb94c58f6e0a8f58971 | 709,318 |
import os
import sys
import subprocess
def transdecodeToPeptide(sample_name, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = ".fasta", pep_ext = ".faa", run_transdecoder = False):
"""
Use TransDecoder to convert input nucleotide metatranscriptomic sequences to peptide sequences.
"""
if (not run_transdecoder):
return 0
print("Running TransDecoder for sample " + str(sample_name) + "...", flush = True)
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
if (os.path.isfile(os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))) & (not rerun_rules):
print("TransDecoder file already detected for sample " +
str(sample_name) + "; will not re-run step.", flush = True)
return 0
elif (os.path.isfile(os.path.join(sample_dir, sample_name + pep_ext))) & (not rerun_rules):
print("Protein files detected for sample in sample directory; " +
"will not TransDecode.", flush = True)
os.system("cp " + os.path.join(sample_dir, sample_name + pep_ext) + " " +
os.path.join(output_dir, mets_or_mags, sample_name + pep_ext))
return 0
TD_log = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".err"), "w+")
if (not os.path.isfile(os.path.join(sample_dir, sample_name + nt_ext))):
print("File: " + os.path.join(sample_dir, sample_name + nt_ext) + " was called by TransDecoder and "
"does not exist. Check for typos.")
sys.exit(1)
rc1 = subprocess.Popen(["TransDecoder.LongOrfs", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"-m", str(transdecoder_orf_size)], stdout = TD_log, stderr = TD_err).wait()
TD_log.close()
TD_err.close()
TD_log = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".err"), "w+")
rc2 = subprocess.Popen(["TransDecoder.Predict", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"--no_refine_starts"], stdout = TD_log, stderr = TD_err).wait()
#rc2 = p2.returncode
TD_log.close()
TD_err.close()
if (rc1 + rc2) != 0:
print("TransDecoder did not complete successfully for sample " +
str(sample_name) + ". Check <output_dir>/log/ folder for details.")
sys.exit(1)
merged_name = sample_name + nt_ext
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags))
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
os.replace(merged_name + ".transdecoder.pep", os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))
os.replace(merged_name + ".transdecoder.cds", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.cds"))
os.replace(merged_name + ".transdecoder.gff3", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.gff3"))
os.replace(merged_name + ".transdecoder.bed", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.bed"))
#shutil.rmtree
os.system("rm -rf " + merged_name + "*.transdecoder_dir*")
return rc1 + rc2 | b22f520808104e4fc471c4af5a2288a5f23b84ae | 709,319 |
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_query_tor_network]
base_url = https://onionoo.torproject.org/details
#The Flag can be 'Running','Exit' for more information on flag settings - https://metrics.torproject.org/onionoo.html
flag = Exit
# The data fields should be comma separated and no space should be given in between each fields
data_fields = exit_addresses,or_addresses,host_name"""
return config_data | 239436c9b2141e17f6158aab20d7951d79359fcd | 709,320 |
import argparse
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='LeNet model')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('--dataset',
dest='dataset',
help='Choice of dataset to train model',
choices=['mnist', 'cifar10'],
default=None)
optional.add_argument('--print_model',
dest='print_model',
help='Print LeNet model',
action='store_true')
optional.add_argument('--train_model',
dest='train_model',
help='Train LeNet on MNIST',
action='store_true')
optional.add_argument('-s', '--save_weights',
dest='save_weights',
help='Save the trained weights',
default=None)
optional.add_argument('-w', '--weights',
dest='weights',
help='Path to weights (hdf5) file',
default=None)
optional.add_argument('-e', '--epochs',
dest='epochs',
help='Number of epochs for training',
type=int,
default=20)
optional.add_argument('--data_augmentation',
dest='data_augmentation',
help='Use data augmentations for input',
action='store_true')
optional.add_argument('--viz_training',
dest='viz_training',
help='Visualize the training curve',
action='store_true')
parser._action_groups.append(optional)
return parser.parse_args() | 6a93e1083ebc8fad5f0698b2e0a4eb125af2806f | 709,322 |
def start_call(called_ident, skicall):
"""When a call is initially received this function is called.
Unless you want to divert to another page, this function should return called_ident which
would typically be the ident of a Responder or Template page dealing with the call.
If a ServeFile exception is raised, which contains a pathlib.Path object of a local server
file then that server file will be sent to the client. In this case, the end_call function
will not be called."""
# To serve a directory of static files, you can map a url to a server directory with the
# skicall.map_url_to_server method, which returns pathlib.Path objects, and then
# raise a ServeFile exception, which causes the file to be served. For example:
# servedfile = skicall.map_url_to_server("images", "/home/user/thisproject/imagefiles")
# if servedfile:
# raise ServeFile(servedfile)
# Of particular interest at this point are the attributes:
# skicall.received_cookies is a dictionary of cookie name:values received from the client
# skicall.call_data is a dictionary which you can set with your own data and, as skicall is
# passed on to the submit_data and end_call functions defined below, can be used to pass
# data to these functions.
# Normally you would return called_ident, which is the page being called, or None to cause a
# page not found error, or another ident (project, pagenumber) to divert the call to another page.
return called_ident | 0353d81273ea6638858bf18271f4480895ca1db1 | 709,323 |
def determine_nohit_score(cons, invert):
"""
Determine the value in the matrix assigned to nohit given SeqFindr options
:param cons: whether the Seqfindr run is using mapping consensus data
or not
:param invert: whether the Seqfindr run is inverting (missing hits to
be shown as black bars.
:type cons: None of boolean
:type cons: boolean
:returns: the value defined as no hit in the results matrix
"""
if cons is None:
nohit = 0.5
else:
nohit = 1.0
if invert:
nohit = nohit*-1.0
return nohit | d0539b5ac4dda8b4a15c6800fb4a821cb305b319 | 709,324 |
def extract_remove_outward_edges_filter(exceptions_from_removal):
"""
This creates a closure that goes through the list of tuples to explicitly state which edges are leaving from the first argument of each tuple.
Each tuple that is passed in has two members. The first member is a string representing a single node from which the children will be explicitly stated. The second member is the list of nodes that are in its child set.
If the
This covers both barren_nodes and explicit_parent_offspring.
"""
def remove_outward_edges_filter(G):
graph = G.copy()
list_of_parents = [x[0] for x in exceptions_from_removal if len(x[1]) > 0]
list_of_barrens = [x[0] for x in exceptions_from_removal if len(x[1]) == 0]
for barren in list_of_barrens:
graph.remove_edges_from([edge for edge in graph.edges() if edge[0] == barren])
for parent in list_of_parents:
current_edges = graph.out_edges(parent)
valid_edges = [(x[0],y) for x in exceptions_from_removal if x[0] == parent for y in x[1]]
graph.remove_edges_from([edge for edge in current_edges if edge not in valid_edges])
return graph
return remove_outward_edges_filter | 543e5823b8375cbdec200988ea5dd0c4f2d23d05 | 709,325 |
import torch
def ln_addTH(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:
"""
out = x + beta[None, :, None]
"""
return x + beta[None, :, None] | 77e556c41a33a8c941826604b4b595ea7d456f9a | 709,326 |
import torch
def support_mask_to_label(support_masks, n_way, k_shot, num_points):
"""
Args:
support_masks: binary (foreground/background) masks with shape (n_way, k_shot, num_points)
"""
support_masks = support_masks.view(n_way, k_shot*num_points)
support_labels = []
for n in range(support_masks.shape[0]):
support_mask = support_masks[n, :] #(k_shot*num_points)
support_label = torch.zeros_like(support_mask)
mask_index = torch.nonzero(support_mask).squeeze(1)
support_label= support_label.scatter_(0, mask_index, n+1)
support_labels.append(support_label)
support_labels = torch.stack(support_labels, dim=0)
support_labels = support_labels.view(n_way, k_shot, num_points)
return support_labels.long() | e6d73dc93e1e0b54d805d9c8b69785168dd2621e | 709,327 |
def list_books(books):
"""Creates a string that, on each line, informs about a book."""
return '\n'.join([f'+ {book.name}: {book.renew_count}: {book.return_date}'
for book in books]) | fce770a39def7f40ed12820a578b4e327df7da43 | 709,328 |
def format_pvalue(p_value, alpha=0.05, include_equal=True):
"""
If p-value is lower than 0.05, change it to "<0.05", otherwise, round it to two decimals
:param p_val: input p-value as a float
:param alpha: significance level
:param include_equal: include equal sign ('=') to pvalue (e.g., '=0.06') or not (e.g., '0.06')
:return: p_val: processed p-value (replaced by "<0.05" or rounded to two decimals) as a str
"""
if p_value < alpha:
p_value = "<" + str(alpha)
else:
if include_equal:
p_value = '=' + str(round(p_value, 3))
else:
p_value = str(round(p_value, 3))
return p_value | aa6506b14b68746f4fa58d951f246321e8b5a627 | 709,329 |
def genmatrix(list, combinfunc, symmetric=False, diagonal=None):
"""
Takes a list and generates a 2D-matrix using the supplied combination
function to calculate the values.
PARAMETERS
list - the list of items
combinfunc - the function that is used to calculate teh value in a cell.
It has to cope with two arguments.
symmetric - Whether it will be a symmetric matrix along the diagonal.
For example, it the list contains integers, and the
combination function is abs(x-y), then the matrix will be
symmetric.
Default: False
diagonal - The value to be put into the diagonal. For some functions,
the diagonal will stay constant. An example could be the
function "x-y". Then each diagonal cell will be "0".
If this value is set to None, then the diagonal will be
calculated.
Default: None
"""
matrix = []
row_index = 0
for item in list:
row = []
col_index = 0
for item2 in list:
if diagonal is not None and col_index == row_index:
# if this is a cell on the diagonal
row.append(diagonal)
elif symmetric and col_index < row_index:
# if the matrix is symmetric and we are "in the lower left triangle"
row.append( matrix[col_index][row_index] )
else:
# if this cell is not on the diagonal
row.append(combinfunc(item, item2))
col_index += 1
matrix.append(row)
row_index += 1
return matrix | b7d8ebc916f57621a20c371139162cb0504470cd | 709,330 |
import os
import requests
def get_recommended_meals():
"""[summary]
Returns:
[type]: [description]
"""
url = "https://themealdb.p.rapidapi.com/randomselection.php"
headers = {
"x-rapidapi-host": "themealdb.p.rapidapi.com",
"x-rapidapi-key": os.getenv("RAPIDAPI"),
}
response = requests.request("GET", url, headers=headers).json()
list_of_food = []
list_of_image = []
for food in response["meals"]:
list_of_food.append(food["strMeal"])
for image in response["meals"]:
list_of_image.append(image["strMealThumb"])
return list_of_food, list_of_image | 6d7376e94f4bad9767d81537b8ddb4808d71ca01 | 709,331 |
import os
def find_executable(name):
"""
Find executable by ``name`` by inspecting PATH environment variable, return
``None`` if nothing found.
"""
for dir in os.environ.get('PATH', '').split(os.pathsep):
if not dir:
continue
fn = os.path.abspath(os.path.join(dir, name))
if os.path.exists(fn):
return os.path.abspath(fn) | dd4b10e4b043715d211bb9be2d2c78d0218f6a86 | 709,332 |
def scale17(data, factor):
"""Solution to exercise C-1.17.
Had we implemented the scale function (page 25) as follows, does it work
properly?
def scale(data, factor):
for val in data:
val *= factor
Explain why or why not.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
No, it doesn't work. Per the text, page 21:
"It is worth noting that val is treated as a standard identifier. If the
element of the original data happens to be mutable, the val identifier can
be used to invoke its methods. But a reassignment of identifier val to a
new value has no affect on the original data, nor on the next iteration of
the loop."
The code above fails because it tries to assign a new value to the "val"
identifier. This merely breaks the alias without changing the list.
"""
for val in data:
val *= factor
return data | 84ac4012e0c839b78cb8617b6b9b7c2e8c54caa2 | 709,333 |
import sqlite3
def initialize_database() -> sqlite3.Connection:
"""Create a sqlite3 database stored in memory with two tables to hold
users, records and history. Returns the connection to the created database."""
with sqlite3.connect("bank_buds.db") as conn:
conn.execute("""CREATE TABLE IF NOT EXISTS user(
customer_id TEXT NOT NULL,
firstName TEXT NOT NULL,
lastName TEXT NOT NULL,
userName TEXT NOT NULL,
userPass TEXT NOT NULL,
balance INTEGER NOT NULL)""")
conn.execute("""CREATE TABLE IF NOT EXISTS user_record(
rec_id TEXT REFERENCES user NOT NULL,
wins INTEGER NOT NULL,
losses INTEGER NOT NULL)""")
conn.execute("""CREATE TABLE IF NOT EXISTS challenge_history(
challenge_id INTEGER NOT NULL,
challenge_starter TEXT REFERENCES user NOT NULL,
challenge_opponent TEXT REFERENCES user NOT NULL,
challenge_winner TEXT REFERENCES user NOT NULL,
challenge_loser TEXT REFERENCES user NOT NULL,
is_active INTEGER NOT NULL,
goal INTEGER NOT NULL)""")
return conn | c3e32534de39a53686672c5c537a2c277fa2d06d | 709,334 |
def status(proc):
"""Check for processes status"""
if proc.is_alive==True:
return 'alive'
elif proc.is_alive==False:
return 'dead'
else:
return proc.is_alive() | e257385f06979643e19fd9facc2118f4ae07c909 | 709,335 |
def gen_blinds(depth, width, height, spacing, angle, curve, movedown):
"""Generate genblinds command for genBSDF."""
nslats = int(round(height / spacing, 0))
slat_cmd = "!genblinds blindmaterial blinds "
slat_cmd += "{} {} {} {} {} {}".format(
depth, width, height, nslats, angle, curve)
slat_cmd += "| xform -rz -90 -rx -90 -t "
slat_cmd += f"{-width/2} {-height/2} {-movedown}\n"
return slat_cmd | 2e8a2751f2bb2be0c2ffdff8218961b0b1c0191b | 709,336 |
def build_headers(access_token, client_id):
"""
:param access_token: Access token granted when the user links their account
:param client_id: This is the api key for your own app
:return: Dict of headers
"""
return {'Content-Type': 'application/json',
'Authorization': f'Bearer {access_token}',
'trakt-api-version': '2',
'trakt-api-key': client_id} | 5cd8ae3e06f67b7a4fdb1644ae82c62cb54479cb | 709,337 |
def lab_equality(lab1, lab2):
"""
Check if two labs are identical
"""
if lab1["ncolumns"] != lab1["ncolumns"] or lab1["nlines"] != lab2["nlines"]:
return False
return all(set(lab1[cell]) == set(lab2[cell]) for cell in lab1.keys() if type(cell) != type("a")) | d5ffca9acfa6bc2cc324f1b6c5ed416541812c13 | 709,339 |
def is_sorted(t):
"""Checks whether a list is sorted.
t: list
returns: boolean
"""
return t == sorted(t) | 442c5a4670c595f3dea45c8aac315eda5dae26d0 | 709,340 |
def values(series):
"""Count the values and sort.
series: pd.Series
returns: series mapping from values to frequencies
"""
return series.value_counts(dropna=False).sort_index() | d4ef6b93b7f2790d8130ac045e9c315b8d57a245 | 709,341 |
def normalization_reg_loss(input):
"""
input: [..., 3]
It computes the length of each vector and uses the L2 loss between the lengths and 1.
"""
lengths = (input ** 2).sum(dim=-1).sqrt()
loss_norm_reg = ((lengths - 1) ** 2).mean()
return loss_norm_reg | 3b9d999c90d8e9b3ce797d286bb2f0b215fa7ee5 | 709,342 |
def _get_window_size(offset, step_size, image_size):
"""
Calculate window width or height.
Usually same as block size, except when at the end of image and only a
fracture of block size remains
:param offset: start columns/ row
:param step_size: block width/ height
:param image_size: image width/ height
:return: window width/ height
"""
if offset + step_size > image_size:
return image_size - offset
else:
return step_size | 90d65229c54a5878fa9b2af8e30293e743679e42 | 709,343 |
def sturm_liouville_function(x, y, p, p_x, q, f, alpha=0, nonlinear_exp=2):
"""Second order Sturm-Liouville Function defining y'' for Lu=f.
This form is used because it is expected for Scipy's solve_ivp method.
Keyword arguments:
x -- independent variable
y -- dependent variable
p -- p(x) parameter
p_x -- derivative of p_x wrt x
q -- q(x) parameter
f -- forcing function f(x)
alpha -- nonlinear parameter
nonlinear_exp -- exponent of nonlinear term
"""
y_x = y[1]
y_xx = -1*(p_x/p)*y[1] + (q/p)*y[0] + (q/p)*alpha*y[0]**nonlinear_exp - f/p
return [y_x, y_xx] | 5c34cc622075c640fe2dec03b1ae302192d0f779 | 709,344 |
def is_private(key):
"""
Returns whether or not an attribute is private.
A private attribute looks like: __private_attribute__.
:param key: The attribute key
:return: bool
"""
return key.startswith("__") and key.endswith("__") | 498e7522e95317dbb171961f0f5fe8350c29a69d | 709,345 |
def _channel_name(row, prefix="", suffix=""):
"""Formats a usable name for the repeater."""
length = 16 - len(prefix)
name = prefix + " ".join((row["CALL"], row["CITY"]))[:length]
if suffix:
length = 16 - len(suffix)
name = ("{:%d.%d}" % (length, length)).format(name) + suffix
return name | 4452670e28b614249fb184dd78234e52ee241086 | 709,346 |
def d_out_dist_cooler(P_mass, rho_dist_cool, w_drift):
"""
Calculates the tube's diameter of out distilliat from distilliat cooler to distilliat volume.
Parameters
----------
P_mass : float
The mass flow rate of distilliat, [kg/s]
rho_dist_cool : float
The density of liquid at cooling temperature, [kg/m**3]
w_drift :float
The speed of steam at the tube, [m/s]
Returns
-------
d_out_dist_cooler : float
The tube's diameter of out distilliat from distilliat cooler to distilliat volume, [m]
References
----------
&&&
"""
return P_mass/(0,785*rho_dist_cool*w_drift) | 8d6dfb85aa954ef88c821d2ee1d0bb787d409e96 | 709,347 |
import socket
def is_port_in_use(port):
"""
test if a port is being used or is free to use.
:param port:
:return:
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0 | 5bbdd7b39c2380d2e07e85f483f3ea5072bb616b | 709,348 |
def mixin_method(ufunc, rhs=None, transpose=True):
"""Decorator to register a mixin class method
Using this decorator ensures that derived classes that are declared
with the `mixin_class` decorator will also have the behaviors that this
class has.
ufunc : numpy.ufunc
A universal function (or NEP18 callable) that is hooked in awkward1,
i.e. it can be the first argument of a behavior
rhs : Set[type] or None
List of right-hand side argument types (leave None if unary function)
The left-hand side is expected to always be ``self`` of the parent class
If the function is not unary or binary, call for help :)
transpose : bool
Autmatically create a transpose signature (only makes sense for binary ufuncs)
"""
def register(method):
if not isinstance(rhs, (set, type(None))):
raise ValueError("Expected a set of right-hand-side argument types")
if transpose and rhs is not None:
def transposed(left, right):
return method(right, left)
method._awkward_mixin = (ufunc, rhs, transposed)
else:
method._awkward_mixin = (ufunc, rhs, None)
return method
return register | d1130740628eb947bd786bc3393343b8c283164d | 709,349 |
import os
def get_new_file_number(pat, destdir, startnum=1, endnum=10000):
"""Substitute the integers from startnum to endnum into pat and
return the first one that doesn't exist. The file name that is
searched for is os.path.join(destdir, pat % i)."""
for i in range(startnum, endnum):
temp = pat % i
if not os.path.exists(os.path.join(destdir, temp)):
return i | 33ffed09804692bd1cb3dbe94cbdfc36eed42270 | 709,351 |
import sys
import six
def cast_env(env):
"""Encode all the environment values as the appropriate type for each Python version
This assumes that all the data is or can be represented as UTF8"""
env_type = six.ensure_binary if sys.version_info[0] < 3 else six.ensure_str
return {env_type(key): env_type(value) for key, value in six.iteritems(env)} | 885811983c6ca8732338a68f683e5c0f833820c2 | 709,352 |