content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result | 57c82081d92db74a7cbad15262333053a2acd3a7 | 709,914 |
def shorten_namespace(elements, nsmap):
"""
Map a list of XML tag class names on the internal classes (e.g. with shortened namespaces)
:param classes: list of XML tags
:param nsmap: XML nsmap
:return: List of mapped names
"""
names = []
_islist = True
if not isinstance(elements, (list, frozenset)):
elements = [elements]
_islist = False
for el in elements:
for key, value in nsmap.items():
if value in el:
if key == "cim":
name = el.split(value)[-1]
name = name[1:] if name.startswith("}") else name
elif "{"+value+"}" in el:
name = el.replace("{"+value+"}", key+"_")
else:
name = el.replace(value, key+"_")
names.append(name)
if el.startswith("#"):
names.append(el.split("#")[-1])
if not _islist and len(names) == 1:
names = names[0]
return names | 73dfc4f24a9b0a73cf7b6af7dae47b880faa3e27 | 709,915 |
import math
def func2():
"""
:type: None
:rtype: List[float]
"""
return [math.pi, math.pi / 2, math.pi / 4, math.pi / 8] | 62984ba7d8c1efd55569449adbf507e73888a1b7 | 709,916 |
import random
def weight(collection):
"""Choose an element from a dict based on its weight and return its key.
Parameters:
- collection (dict): dict of elements with weights as values.
Returns:
string: key of the chosen element.
"""
# 1. Get sum of weights
weight_sum = sum([value for value in collection.values()])
# 2. Generate random number between 1 and sum of weights
random_value = random.randint(1, weight_sum)
# 3. Iterate through items
for key, value in collection.items():
# 4. Subtract weight of each item from random number
random_value -= value
# 5. Compare with 0, if <= 0, that item has been chosen
if random_value <= 0:
return key
# 6. Else continue subtracting
# Should not reach here.
raise ValueError("Invalid argument value.") | 383ddadd4a47fb9ac7be0292ecc079fcc59c4481 | 709,917 |
from typing import List
def make_matrix(points: List[float], degree: int) -> List[List[float]]:
"""Return a nested list representation of a matrix consisting of the basis
elements of the polynomial of degree n, evaluated at each of the points.
In other words, each row consists of 1, x, x^2, ..., x^n, where n is the degree,
and x is a value in points.
Preconditions:
- degree < len(points)
>>> make_matrix([1, 2, 3], 2)
[[1, 1, 1], [1, 2, 4], [1, 3, 9]]
"""
matrix = []
for point in points:
row = [point ** index for index in range(degree + 1)]
matrix.append(row)
return matrix | d8fbea3a0f9536cb681b001a852b07ac7b17f6c2 | 709,918 |
def adjust_seconds_fr(samples_per_channel_in_frame,fs,seconds_fr,num_frame):
"""
Get the timestamp for the first sample in this frame.
Parameters
----------
samples_per_channel_in_frame : int
number of sample components per channel.
fs : int or float
sampling frequency.
seconds_fr : int or float
seconds for this frame (from frame header)
num_frame : int
frame number (from frame header).
Returns
-------
time_first_frame : float
timestamp [s] corresponding to the first sample of this frame.
"""
seconds_per_frame=samples_per_channel_in_frame/float(fs)
time_first_sample=float(seconds_fr)+num_frame*seconds_per_frame
return(time_first_sample) | a19775db3ebcdbe66b50c30bc531e2980ca10082 | 709,919 |
def add_header(unicode_csv_data, new_header):
"""
Given row, return header with iterator
"""
final_iterator = [",".join(new_header)]
for row in unicode_csv_data:
final_iterator.append(row)
return iter(final_iterator) | 1fa50492d786aa28fba6062ac472f1c6470a6311 | 709,920 |
def is_success(msg):
"""
Whether message is success
:param msg:
:return:
"""
return msg['status'] == 'success' | 43ecbf3c7ac8d03ce92ab059e7ec902e51505d0a | 709,921 |
def list_strip_comments(list_item: list, comment_denominator: str = '#') -> list:
"""
Strips all items which are comments from a list.
:param list_item: The list object to be stripped of comments.
:param comment_denominator: The character with which comment lines start with.
:return list: A cleaned list object.
"""
_output = list()
for _item in list_item:
if not _item[0] == comment_denominator:
_output.append(_item)
return _output | e5dd6e0c34a1d91586e12e5c39a3a5413746f731 | 709,922 |
def kebab(string):
"""kebab-case"""
return "-".join(string.split()) | 24bc29e066508f6f916013fa056ff54408dcd46d | 709,923 |
def counter_format(counter):
"""Pretty print a counter so that it appears as: "2:200,3:100,4:20" """
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items())) | 992993a590eabb2966eb9de26625077f2597718c | 709,925 |
def _update_dict_within_dict(items, config):
""" recursively update dict within dict, if any """
for key, value in items:
if isinstance(value, dict):
config[key] = _update_dict_within_dict(
value.items(), config.get(key, {})
)
else:
config[key] = value
return config | 75b840b8091568b80f713b2ca7725b1a1f917d3a | 709,926 |
def select_report_data(conn):
""" select report data to DB """
cur = conn.cursor()
cur.execute("SELECT * FROM report_analyze")
report = cur.fetchall()
cur.close()
return report | 9d0bf6d4f6758c873bd6643673784239f9bf4557 | 709,927 |
import numpy
def func_lorentz_by_h_pv(z, h_pv, flag_z: bool = False, flag_h_pv: bool = False):
"""Gauss function as function of h_pv
"""
inv_h_pv = 1./h_pv
inv_h_pv_sq = numpy.square(inv_h_pv)
z_deg = z * 180./numpy.pi
c_a = 2./numpy.pi
a_l = c_a * inv_h_pv
b_l = 4.*inv_h_pv_sq
z_deg_sq = numpy.square(z_deg)
res = numpy.expand_dims(a_l, axis=-1) /(1+ numpy.expand_dims(b_l, axis=-1) * z_deg_sq)
dder = {}
if flag_z:
dder["z"] = -2.*z_deg*numpy.expand_dims(b_l,axis=-1)*res/(1.+numpy.expand_dims(b_l, axis=-1)*z_deg_sq) * 180./numpy.pi
if flag_h_pv:
dder["h_pv"] = (c_a * (numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq) - \
c_a * numpy.expand_dims(h_pv, axis=-1))/numpy.square(numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq)
return res, dder | 802029e167439471e892fbfbfe4d6fdce8cb1a0e | 709,928 |
from pathlib import Path
def all_files(dir, pattern):
"""Recursively finds every file in 'dir' whose name matches 'pattern'."""
return [f.as_posix() for f in [x for x in Path(dir).rglob(pattern)]] | 45f12cda2e16cb745d99d2c8dfb454b32130e1c8 | 709,929 |
def matches(spc, shape_):
"""
Return True if the shape adheres to the spc (spc has optional color/shape
restrictions)
"""
(c, s) = spc
matches_color = c is None or (shape_.color == c)
matches_shape = s is None or (shape_.name == s)
return matches_color and matches_shape | fa9c90ea2be17b0cff7e4e76e63cf2c6a70cc1ec | 709,930 |
def is_str(element):
"""True if string else False"""
check = isinstance(element, str)
return check | c46b80d109b382de761618c8c9a50d94600af876 | 709,932 |
def deal_text(text: str) -> str:
"""deal the text
Args:
text (str): text need to be deal
Returns:
str: dealed text
"""
text = " "+text
text = text.replace("。","。\n ")
text = text.replace("?","?\n ")
text = text.replace("!","!\n ")
text = text.replace(";",";\n ")
return text | 8f16e7cd2431dfc53503c877f9d4b5429f738323 | 709,933 |
import zipfile
import os
def extract_zip(src, dest):
"""extract a zip file"""
bundle = zipfile.ZipFile(src)
namelist = bundle.namelist()
for name in namelist:
filename = os.path.realpath(os.path.join(dest, name))
if name.endswith('/'):
os.makedirs(filename)
else:
path = os.path.dirname(filename)
if not os.path.isdir(path):
os.makedirs(path)
_dest = open(filename, 'wb')
_dest.write(bundle.read(name))
_dest.close()
bundle.close()
return namelist | 5e8af22a446e52c26b99b71fefdd29d3b10e02ec | 709,934 |
def list2str(lst, indent=0, brackets=True, quotes=True):
"""
Generate a Python syntax list string with an indention
:param lst: list
:param indent: indention as integer
:param brackets: surround the list expression by brackets as boolean
:param quotes: surround each item with quotes
:return: string
"""
if quotes:
lst_str = str(lst)
if not brackets:
lst_str = lst_str[1:-1]
else:
lst_str = ', '.join(lst)
if brackets:
lst_str = '[' + lst_str + ']'
lb = ',\n' + indent*' '
return lst_str.replace(', ', lb) | ef441632bf59714d3d44ede5e78835625b41f047 | 709,935 |
import os
def full_path(path):
"""
Get an absolute path.
"""
if path[0] == "/":
return path
return os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", path)
) | 0ea845638d541521277fac3904cb1c1b243e88b1 | 709,936 |
import _queue
def model_from_queue(model):
""" Returns the model dict if model is enqueued, else None."""
return _queue.get(model, None) | 46eea9b8a218181b000308b080a8c9dad7e866b2 | 709,937 |
def get_class_name(obj, instance=True):
"""
Given a class or instance of a class, returns a string representing the
fully specified path of the class.
Parameters
----------
obj : object
An instance of any object
instance: bool
Indicates whether given object is an instance of the class to be named
"""
typ = type(obj) if instance else obj
return "{}.{}".format(typ.__module__, typ.__name__) | 3a7ebd1fb2682ec5dff6d42cd2cccf918d67f9a0 | 709,938 |
def maxindices(l):
"""
Get indices for all occurences of maximal element in list
:param l:
:return:
"""
max_indices = []
max_value = l[0] #Assume un-exhaustible iterator
for i, v in enumerate(l):
if v > max_value:
max_value = v
max_indices = [i]
elif v == max_value:
max_indices.append(i)
return max_indices | b2f155fa97455c0327b2717591ebea2176773012 | 709,939 |
def multiples(a, b):
"""This function checks if a number is a multiple of another."""
if type(a) != int or type(b) != int:
raise Exception('Values must be integers.')
elif a == 0:
raise Exception('0 is not valid.')
elif a == b:
raise Exception('Numbers should not be the same.')
else:
if b > a:
check = b % a
if not check:
return True
else:
return False
else:
raise Exception("Error! {0} isn't greater than {1}."
.format(b, a)) | 3f8bccd5429b5d307c0a018b7186bd75a76e996a | 709,941 |
def retr_amplslen(peri, radistar, masscomp, massstar):
"""
Calculate the self-lensing amplitude.
Arguments
peri: orbital period [days]
radistar: radius of the star [Solar radius]
masscomp: mass of the companion [Solar mass]
massstar: mass of the star [Solar mass]
Returns
amplslen: the fractional amplitude of the self-lensing
"""
amplslen = 7.15e-5 * radistar**(-2.) * peri**(2. / 3.) * masscomp * (masscomp + massstar)**(1. / 3.) * 1e3 # [ppt]
return amplslen | 32c0618f0e5965357fbcadd090443d0baf0e65bd | 709,942 |
from datetime import datetime
def calculate_current_teach_week(semester_first_week_date='2021-3-08 08:00:00'):
"""
计算当前日期所属教学周,实现思路是:当前日期所属一年中的周 - 每学期的第一周
----
param: semester_first_week_date: 学期第一周的日期,例如 '2021-3-08 08:00:00'
return: 当前教学周
"""
# 获取指定日期属于当年的第几周, 返回字符串
semester_first_week = datetime.strptime(semester_first_week_date, '%Y-%m-%d %H:%M:%S').strftime('%W')
# 获取当前日期是一年中的第几周, 返回字符串
current_year_week = datetime.now().strftime('%W')
# 计算当前日期所属的教学周
# ( ) 中的减一表示第一周之前的周数
# 最后加一是因为计算周数是从索引00开始的,所以需要加1
current_teach_week = int(current_year_week) - (int(semester_first_week) - 1) + 1
return current_teach_week | 01a8df84b878e192dae1b1d0d38d78fb5c19f93e | 709,943 |
import re
def get_sandbox_table_name(dataset_id, rule_name):
"""
A helper function to create a table in the sandbox dataset
:param dataset_id: the dataset_id to which the rule is applied
:param rule_name: the name of the cleaning rule
:return: the concatenated table name
"""
return '{dataset_id}_{rule_name}'.format(dataset_id=dataset_id,
rule_name=re.sub(
r'\W', '_', rule_name)) | ee07d40f885cb9d6d0d34cc0215620a2572b6b5f | 709,944 |
import copy
def recursive_dict_merge(dict1, dict2):
"""
Merges dictionaries (of dictionaries).
Preference is given to the second dict, i.e. if a key occurs in both dicts, the value from `dict2` is used.
"""
result = copy.deepcopy(dict1)
for key in dict2:
if key in dict1 and isinstance(dict1[key], dict) and isinstance(dict2[key], dict):
result[key] = recursive_dict_merge(dict1[key], dict2[key])
else:
result[key] = dict2[key]
return result | fbcb51ad47de0dd4d1c95cd59873918187736b63 | 709,945 |
import os
def get_hosts_from_file(hostfile):
"""
Return the list of hosts from a given host file.
"""
hosts = []
if os.path.exists(hostfile):
for line in open(hostfile, "r").readlines():
hosts.append(line.split(' ', 1)[0])
return hosts | f49b7734caa679b328a9649c5fb6fd3009c20e18 | 709,946 |
import os
import errno
def process_exists(pid): # type: (int) -> bool
""" Checks if the processed with the given *pid* exists. Returns #True if
that is the case, #False otherwise. """
if pid == 0:
return False
try:
os.kill(pid, 0)
except OSError as exc:
if exc.errno == errno.ESRCH:
return False
return True | 80bc3de2270d69ca7b4b5e60e4e87d13253e2d11 | 709,947 |
import sys
import os
def get_application_name():
"""Attempts to find the application name using system arguments."""
if hasattr(sys, 'argv') and sys.argv[0]:
app_name = os.path.basename(sys.argv[0])
else:
app_name = None
return app_name | f37c0913b2e227e20a22e3d6bd8ba1fdf4b7f6f3 | 709,948 |
def the_H_function(sorted_citations_list, n=1):
"""from a list of integers [n1, n2 ..] representing publications citations,
return the max list-position which is >= integer
eg
>>> the_H_function([10, 8, 5, 4, 3]) => 4
>>> the_H_function([25, 8, 5, 3, 3]) => 3
>>> the_H_function([1000, 20]) => 2
"""
if sorted_citations_list and sorted_citations_list[0] >= n:
return the_H_function(sorted_citations_list[1:], n + 1)
else:
return n - 1 | 24ad3d85963ef0a9d4531ba552371d7e829f1c2a | 709,949 |
import random
from bs4 import BeautifulSoup
def get_random_quote(quotes_list):
"""Return a random quote to user."""
upper_limit = len(quotes_list)-1
select = random.randint(0, upper_limit)
selected_quote = quotes_list[select]
soup = BeautifulSoup(selected_quote, 'html.parser')
return soup.text | c50f99640da88319c2b643b0fe1c386206c0c00b | 709,950 |
def get_extension(fname):
"""
Get file extension.
"""
return '.' + fname.split(".")[-1] | 9fa6f63d848aa7781b55e9cc384c9a8cb9665c69 | 709,951 |
def rotation(new_rotation=0):
"""Set the display rotation.
:param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270
"""
global _rotation
if new_rotation in [0, 90, 180, 270]:
_rotation = new_rotation
return True
else:
raise ValueError("Rotation: 0, 90, 180 or 270 degrees only") | 4f12a90e104ef66e50520523d23b3fff421fa991 | 709,952 |
def parse_url (url:str) -> str:
"""
规范化 URL
-> hello/world
<- /hello/world
"""
if url == "": url = "/"
if not url.startswith ('/'): url = "/" + url # 添加开头斜杠
# if not url.endswith ("/"): url += "/" # 添加末尾斜杠
return url | dd2ace64bd5926f2b20a77c81a1e885e8a4d3d2b | 709,953 |
def get_vmstat():
"""
Get and format the content of /proc/vmstat
"""
buf = open("/proc/vmstat").read()
buf = [v.replace(' ', ":") for v in buf.split("\n")]
buf = ";".join(buf)
return buf | b2db72bbc3b143ff1ba37ee7e2dcc33295d4a4ea | 709,954 |
def deep_seq_map(xss, fun, keys=None, fun_name=None, expand=False):
"""Applies fun to list of or dict of lists; adds the results in-place.
Usage: Transform a corpus iteratively by applying functions like
`tokenize`, `lower`, or vocabulary functions (word -> embedding id) to it.
from jtr.sisyphos.vocab import Vocab
vocab = Vocab()
keys = ['question', 'support']
corpus = deep_map(corpus, lambda x: x.lower(), keys)
corpus = deep_map(corpus, tokenize, keys)
corpus = deep_map(corpus, vocab, keys)
corpus = deep_map(corpus, vocab._normalize, keys=keys)
-> through tokenize we go from a dict of sentences to
a dict of words (list of lists), thus we now apply deep_seq_map for
processing to add start of and end of sentence tags:
corpus = deep_seq_map(corpus, lambda xs: ["<SOS>"] + xs +
["<EOS>"],
['question', 'support'])
-> From here we can create batches from the corpus and feed it into a model.
In case `expand==False` each top-level entry of `xs` to be transformed
replaces the original entry.
`deep_map` supports `xs` to be a dictionary or a list/tuple:
- In case `xs` is a dictionary, its transformed value is also a dictionary, and `keys` contains the keys of the
values to be transformed.
- In case `xs` is a list/tuple, `keys` contains the indices of the entries to be transformed
The function `deep_map` is recursively applied to the values of `xs`;
the function `fun` takes a sequence as input, and is applied at the one but deepest level,
where the entries are sequences of objects (no longer sequences of sequences).
This is the only difference with `deep_map`
Args:
`xs`: a sequence (list/tuple) of objects or sequences of objects.
`fun`: a function to transform sequences
`keys`: seq with keys if `xs` is dict; seq with integer indices if `xs` is seq.
For entries not in `keys`, the original `xs` value is retained.
`fun_name`: default value 'trf'; string with function tag (e.g. 'lengths'),
used if '''expand==True''' and '''isinstance(xs,dict)'''
Say for example fun_name='count', and `keys` contains 'sentence', then the transformed dict would look like
'''{'sentence':[sentences], 'sentence_lengths':[fun(sentences)] ...}'''
Returns:
Transformed sequence or dictionary.
Example:
>>> dave = [
... "All work and no play makes Jack a dull boy",
... "All work and no play makes Jack a dull boy.",
... "All work and no play makes Jack a very dull boy!"]
>>> jack = [
... "I'm sorry Dave, I'm afraid I can't do that!",
... "I'm sorry Dave, I'm afraid I can't do that",
... "I'm sorry Dave, I'm afraid I cannot do that"]
>>> support = [
... ["Play makes really dull", "really dull"],
... ["Dave is human"],
... ["All work", "all dull", "dull"]]
>>> data2 = {'dave': dave, 'jack': jack, 'support': support}
>>> vocab2 = Vocab()
>>> data2_processed = deep_map(data2, lambda x: tokenize(x.lower()))
>>> data2_ids = deep_map(data2_processed, vocab2)
>>> data2_ids_with_lengths = deep_seq_map(data2_ids, lambda xs: len(xs), keys=['dave','jack','support'],
... fun_name='lengths', expand=True)
>>> pprint.pprint(data2_ids_with_lengths)
{'dave': [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[1, 2, 3, 4, 5, 6, 7, 8, 12, 9, 10, 13]],
'dave_lengths': [10, 11, 12],
'jack': [[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24, 13],
[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24],
[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 25, 23, 24]],
'jack_lengths': [17, 16, 14],
'support': [[[5, 6, 26, 9], [26, 9]], [[18, 27, 28]], [[1, 2], [1, 9], [9]]],
'support_lengths': [[4, 2], [3], [2, 2, 1]]}
"""
if isinstance(xss, list) and all([not isinstance(xs, list) for xs in xss]):
return fun(xss)
else:
if isinstance(xss, dict):
xss_mapped = {}
for k, xs in xss.items():
if keys is None or k in keys:
if expand:
xss_mapped[k] = xs
k = '%s_%s' % (str(k), str(fun_name) if fun_name is not None else 'trf')
if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]):
xss_mapped[k] = fun(xs)
else:
xss_mapped[k] = deep_seq_map(xs, fun) # fun_name not needed, because expand==False
else:
xss_mapped[k] = xs
else:
xss_mapped = []
for k, xs in enumerate(xss):
if keys is None or k in keys:
if expand:
xss_mapped.append(xs)
if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]):
xss_mapped.append(fun(xs))
else:
xss_mapped.append(deep_seq_map(xs, fun))
else:
xss_mapped.append(xs)
return xss_mapped | 59406ae1ee87bfea82f4b22fb3d5fb96c29ccda6 | 709,955 |
import re
def clean_cmd(cmd):
"""Removes multiple spaces and whitespace at beginning or end of command.
Args:
cmd (str): A string containing the command to clean.
Returns:
A cleaned command string.
"""
return re.sub(r'\s{2, }', ' ', cmd).strip(' \t\n\r') | d98f4fea9791cbb5936b306ee74335efc6515902 | 709,956 |
def lorentz(x, a, mu, ga):
""" Input: x - value and a=I, mu=x_0, ga - lorentz f. coeffitients (float)
Return: value of function with desired parameters in x (float)
Descr.: Calculate L-type function for given x and parameters"""
return (a * ga ** 2) / ((x - mu) ** 2 + ga ** 2) | 1af83bdca1ff14f25da86cb0f3dacbd36409f1e1 | 709,957 |
def quick_sort(array):
"""
Not Inplace, but Standard version
"""
if array == []:
return []
else:
pivot = array[-1]
smaller = quick_sort([x for x in array[0:-1] if x <= pivot])
larger = quick_sort([x for x in array[0:-1] if x > pivot])
return smaller + [pivot] + larger | 40b969855394600a94ed264f5bffade95c72455e | 709,958 |
def calc_laplacian_matrix(D, W):
"""
给定图的度矩阵和相似度矩阵,计算拉普拉斯矩阵
:param W: 相似度矩阵
:param D: 图的度矩阵
:return: 拉普拉斯矩阵
"""
return D - W | 542efe382457a34587615d24935c040238098610 | 709,959 |
def _bin2bcd(value):
"""Convert a binary value to binary coded decimal.
:param value: the binary value to convert to BCD. (required, no default)
"""
return value + 6 * (value // 10) | 508383fe8964da3a09699ee8e68f36cea4162746 | 709,960 |
def literal_query(query):
"""Don't interprete any special query syntax
SQLite's FTS extensions support special query syntax for AND, OR and
prefix searches, as well as grouping and negation. There are not of much
use in the dictionary case, but they break some legitimate queries. So
let's treat all queries literally by enlosing them in quotes.
"""
return '"' + query.replace('"', '') + '"' | 65c5f3215a2d36fb15b54e5420ce52ac27d1b420 | 709,961 |
import torch
def inverse_pinhole_matrix(pinhole, eps=1e-6):
"""
Returns the inverted pinhole matrix from a pinhole model
"""
assert len(pinhole.shape) == 2 and pinhole.shape[1] == 12, pinhole.shape
# unpack pinhole values
fx, fy, cx, cy = torch.chunk(pinhole[..., :4], 4, dim=1) # Nx1
# create output container
k = torch.eye(4, device=pinhole.device, dtype=pinhole.dtype)
k = k.view(1, 4, 4).repeat(pinhole.shape[0], 1, 1) # Nx4x4
# fill output with inverse values
k[..., 0, 0:1] = 1. / (fx + eps)
k[..., 1, 1:2] = 1. / (fy + eps)
k[..., 0, 2:3] = -1. * cx / (fx + eps)
k[..., 1, 2:3] = -1. * cy / (fy + eps)
return k | e2fd741598b858f9d8731e4dc2b0c79913941dbf | 709,962 |
def build_2d_grid(ir):
""" Build simple grid with a column for each gate."""
grid = []
for g in ir.gates:
step = [None] * ir.ngates
if g.is_single():
step[g.idx0] = g
if g.is_ctl():
step[g.ctl] = g.ctl
step[g.idx1] = g
grid.append(step)
return grid | 55c17327fb530301ca505b42cdb8d47426491374 | 709,963 |
import random
def throw_dice(n):
"""Throw `n` dice, returns list of integers"""
results = []
while n > 0:
results += [random.randint(1,6)]
n = n-1
return results | 68c56b468ecd1eff59932099dd4620bae9581f45 | 709,964 |
def parcours_serpentin(n):
"""Retourne la liste des indices (colonne,ligne) (!!attention ici
ligne et colonne sont inversées!!) des cases correspondant à un
parcours de tableau de taille n x n en serpentin.
Ex: pour T = [ [1,2,3],
[4,5,6],
[7,8,9] ]
le parcours correspond aux cases 1,2,3,6,9,8,7,4,5 et la
fonction retournera la liste d'indices [(0,0),(1,0),(2,0),(2,1) ...]
"""
return [] | 189e486ad82d75923244daf51c223254f7b29fcc | 709,965 |
def bdev_rbd_unregister_cluster(client, name):
"""Remove Rados cluster object from the system.
Args:
name: name of Rados cluster object to unregister
"""
params = {'name': name}
return client.call('bdev_rbd_unregister_cluster', params) | 03bf70352b8df65044eba1c9ece4b156590e11bc | 709,966 |
def simple_dict_event_extractor(row, condition_for_creating_event, id_field, timestamp_field, name_of_event):
"""
Takes a row of the data df and returns an event record {id, event, timestamp}
if the row satisfies the condition (i.e. condition_for_creating_event(row) returns True)
"""
if condition_for_creating_event(row):
return {'id': row[id_field], 'event': name_of_event, 'timestamp': row[timestamp_field]} | 2195acf5df6f465fdf3160df3abbac54e5ac0320 | 709,967 |
import gc
def test_harvest_lost_resources(pool):
"""Test unreferenced resources are returned to the pool."""
def get_resource_id():
"""
Ensures ``Resource`` falls out of scope before calling
``_harvest_lost_resources()``.
"""
return id(pool.get_resource()._resource)
r_id = get_resource_id()
# Run garbage collection to ensure ``Resource`` created in
# ``get_resource_id()`` is destroyed.
gc.collect()
pool._harvest_lost_resources()
assert r_id == id(pool.get_resource()._resource) | 04b8b29520c2ae9c2c47cef412659e9c567c6a8a | 709,968 |
import numpy
def onedthreegaussian(x, H, A1, dx1, w1, A2, dx2, w2, A3, dx3, w3):
"""
Returns two 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
g1 = A1 * numpy.exp(-(x-dx1)**2 / (2*w1**2))
g2 = A2 * numpy.exp(-(x-dx2)**2 / (2*w2**2))
g3 = A3 * numpy.exp(-(x-dx3)**2 / (2*w3**2))
return H + g1 + g2 + g3 | f93ea1339fe1498fdaeaee91f75b7ba316455646 | 709,969 |
def summation_i_squared(n):
"""Summation without for loop"""
if not isinstance(n, int) or n < 1:
return None
return int(((n*(n+1)*(2*n+1))/6)) | dec0aba274bcaf3e3a821db5962af51d39835438 | 709,970 |
def __getStationName(name, id):
"""Construct a station name."""
name = name.replace("Meetstation", "")
name = name.strip()
name += " (%s)" % id
return name | daab36ed8020536c8dd2c073c352634696a63f3e | 709,971 |
def evalRPN(self, tokens):
# ! 求解逆波兰式,主要利用栈
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for item in tokens:
# print(stack)
if item.isdigit():
stack.append(int(item))
if item[0] == '-' and len(item) > 1 and item[1:].isdigit():
stack.append(int(item))
if item == '*':
num1 = stack.pop()
num2 = stack.pop()
stack.append(num1 * num2)
if item == '/':
num1 = stack.pop()
num2 = stack.pop()
stack.append(int(num2 / num1))
if item == '+':
num1 = stack.pop()
num2 = stack.pop()
stack.append(num1 + num2)
if item == '-':
num1 = stack.pop()
num2 = stack.pop()
stack.append(num2 - num1)
return stack[0] | 6b2050f6f635324878116371cd81a6d25ea31240 | 709,972 |
def is_valid_pre_6_2_version(xml):
"""Returns whether the given XML object corresponds to an XML output file of Quantum ESPRESSO pw.x pre v6.2
:param xml: a parsed XML output file
:return: boolean, True when the XML was produced by Quantum ESPRESSO with the old XML format
"""
element_header = xml.find('HEADER')
if element_header is None:
return False
element_format = element_header.find('FORMAT')
if element_format is None:
return False
try:
name = element_format.attrib['NAME']
except KeyError:
return False
if name != 'QEXML':
return False
return True | 80bda73addc68a88b2a1dc5828c0553cbaf7e6f2 | 709,974 |
def addMovieElement(findings, data):
""" Helper Function which handles unavailable information for each movie"""
if len(findings) != 0:
data.append(findings[0])
else:
data.append("")
return data | af3c45c8b8d4c0cb7ba1cac4925d0f5998affe93 | 709,975 |
def get_trimmed_glyph_name(gname, num):
"""
Glyph names cannot have more than 31 characters.
See https://docs.microsoft.com/en-us/typography/opentype/spec/...
recom#39post39-table
Trims an input string and appends a number to it.
"""
suffix = '_{}'.format(num)
return gname[:31 - len(suffix)] + suffix | a5e90163d15bd4fc0b315414fffd2ac227768ab0 | 709,976 |
import itertools
def generate_itoa_dict(
bucket_values=[-0.33, 0, 0.33], valid_movement_direction=[1, 1, 1, 1]):
"""
Set cartesian product to generate action combination
spaces for the fetch environments
valid_movement_direction: To set
"""
action_space_extended = [bucket_values if m == 1 else [0]
for m in valid_movement_direction]
return list(itertools.product(*action_space_extended)) | b8264174857aeb9d64226cce1cd1625f7e65b726 | 709,977 |
def get_proto_root(workspace_root):
"""Gets the root protobuf directory.
Args:
workspace_root: context.label.workspace_root
Returns:
The directory relative to which generated include paths should be.
"""
if workspace_root:
return "/{}".format(workspace_root)
else:
return "" | 35cff0b28ee6c1893e5dba93593126c996ba72cc | 709,978 |
def quantile_turnover(quantile_factor, quantile, period=1):
"""
Computes the proportion of names in a factor quantile that were
not in that quantile in the previous period.
Parameters
----------
quantile_factor : pd.Series
DataFrame with date, asset and factor quantile.
quantile : int
Quantile on which to perform turnover analysis.
period: int, optional
Number of days over which to calculate the turnover.
Returns
-------
quant_turnover : pd.Series
Period by period turnover for that quantile.
"""
quant_names = quantile_factor[quantile_factor == quantile]
quant_name_sets = quant_names.groupby(level=['date']).apply(
lambda x: set(x.index.get_level_values('asset')))
name_shifted = quant_name_sets.shift(period)
new_names = (quant_name_sets - name_shifted).dropna()
quant_turnover = new_names.apply(
lambda x: len(x)) / quant_name_sets.apply(lambda x: len(x))
quant_turnover.name = quantile
return quant_turnover | 6c7b2afdd4c4f0a2dbf38064d2d8664a25370ca2 | 709,979 |
def numpy_ndarray(nb_arr):
"""Return a copy of numba DeviceNDArray data as a numpy.ndarray.
"""
return nb_arr.copy_to_host() | d6ee1c62428783344fe6232ef229a6dabc8f2a2f | 709,980 |
def convert_to_dict(my_keys, my_values):
"""Merge a given list of keys and a list of values into a dictionary.
Args:
my_keys (list): A list of keys
my_values (list): A list corresponding values
Returns:
Dict: Dictionary of the list of keys mapped to the list of values
"""
return dict(zip(my_keys, my_values)) | e00690d27770539e6b9d2166835f6bd1b9c11c5a | 709,981 |
def print_sig(expr):
"""
Arguments:
- `expr`:
"""
return "{0!s} × {1!s}".format(expr.dom, expr.body) | be8d6fb1ad2256e2a825e383859f72db93318864 | 709,982 |
def calculate_offset(lon, first_element_value):
"""
Calculate the number of elements to roll the dataset by in order to have
longitude from within requested bounds.
:param lon: longitude coordinate of xarray dataset.
:param first_element_value: the value of the first element of the longitude array to roll to.
"""
# get resolution of data
res = lon.values[1] - lon.values[0]
# calculate how many degrees to move by to have lon[0] of rolled subset as lower bound of request
diff = lon.values[0] - first_element_value
# work out how many elements to roll by to roll data by 1 degree
index = 1 / res
# calculate the corresponding offset needed to change data by diff
offset = int(round(diff * index))
return offset | a55eee1dd11b1b052d67ab1abadfc8087c1a2fe0 | 709,983 |
import os
def get_file_metadata(folder, video_relative_path):
""" """
# SAMPLE FILENAME: XXCam_01_20180517203949574.mp4
# XXXXX_XX_YYYYMMDDHHMMSSmmm.mp4
# 2019/01/01/XXCam-20180502-1727-34996.mp4
# XXCam-01-20180502-1727-34996.mp4
video_filename = os.path.basename(video_relative_path)
sub_folder = os.path.dirname(video_relative_path)
basename, extension = os.path.splitext(video_filename)
filename_parts_u = basename.split('_')
filename_parts_d = basename.split('-')
if len(filename_parts_u) == 3 and filename_parts_u[2].isdigit() and len(filename_parts_u[2]) == 17:
file_date = filename_parts_u[2][0:8]
file_time1 = filename_parts_u[2][8:12]
file_time2 = filename_parts_u[2][12:17]
basename_new = '%s-%s-%s-%s' % (filename_parts_u[0], file_date, file_time1, file_time2)
elif len(filename_parts_u) == 3 and filename_parts_u[2].isdigit() and len(filename_parts_u[2]) == 14:
# July2019 firmware update on Reolink camera changed filename format, therefore simplify mine!
file_date = filename_parts_u[2][0:8]
file_time1 = filename_parts_u[2][8:14]
# file_time2 = filename_parts_u[2][12:14]
basename_new = '%s-%s-%s' % (filename_parts_u[0], file_date, file_time1) # ,file_time2)
elif (len(filename_parts_d) == 4 and filename_parts_d[1].isdigit() and len(filename_parts_d[1]) == 8
and filename_parts_d[2].isdigit() and len(filename_parts_d[2]) == 4
and filename_parts_d[3].isdigit() and len(filename_parts_d[3]) == 5):
basename_new = basename
file_date = filename_parts_d[1]
elif (len(filename_parts_d) == 5 and filename_parts_d[2].isdigit() and len(filename_parts_d[2]) == 8
and filename_parts_d[3].isdigit() and len(filename_parts_d[3]) == 4
and filename_parts_d[4].isdigit() and len(filename_parts_d[4]) == 5):
basename_new = basename
file_date = filename_parts_d[2]
else:
basename_new = basename
file_date = 'NO_DATE'
return {'original': video_filename,
'sub_folder': sub_folder,
'source_fullpath': os.path.join(folder, video_relative_path),
'filename_new': '%s%s' % (basename_new, extension),
'basename_new': basename_new,
'basename_original': basename,
'file_date': file_date
} | c8fcc163f1b3fa89206d752181d5f814219fe74a | 709,984 |
import base64
import hashlib
def rehash(file_path):
"""Return (hash, size) for a file with path file_path. The hash and size
are used by pip to verify the integrity of the contents of a wheel."""
with open(file_path, 'rb') as file:
contents = file.read()
hash = base64.urlsafe_b64encode(hashlib.sha256(contents).digest()).decode('latin1').rstrip('=')
size = len(contents)
return hash, size | 167449640e8cbf17d36e7221df3490a12381dd8e | 709,986 |
def prge_annotation():
"""Returns an annotation with protein/gene entities (PRGE) identified.
"""
annotation = {"ents": [{"text": "p53", "label": "PRGE", "start": 0, "end": 0},
{"text": "MK2", "label": "PRGE", "start": 0, "end": 0}],
"text": "p53 and MK2",
"title": ""}
return annotation | dda417c1c1a1146482f4a3340741d938714dbf30 | 709,987 |
from typing import OrderedDict
def sort_dict(value):
"""Sort a dictionary."""
return OrderedDict((key, value[key]) for key in sorted(value)) | 93e03b64d44ab79e8841ba3ee7a3546c1e38d6e4 | 709,988 |
def hyb_stor_capacity_rule(mod, prj, prd):
"""
Power capacity of a hybrid project's storage component.
"""
return 0 | 86ed72e48738df66fca945ff8aaf976f0a7d14e0 | 709,989 |
from typing import List
def choices_function() -> List[str]:
"""Choices functions are useful when the choice list is dynamically generated (e.g. from data in a database)"""
return ['a', 'dynamic', 'list', 'goes', 'here'] | 30b4b05435bacc0a42c91a3f0be09a90098a012f | 709,990 |
import uuid
def nodeid():
"""nodeid() -> UUID
Generate a new node id
>>> nodeid()
UUID('...')
:returns: node id
:rtype: :class:`uuid.UUID`
"""
return uuid.uuid4() | 88a3ddc335ce2ca07bfc0e2caf8487dc2342e80f | 709,991 |
def elements_counter(arr, count=0):
"""递归计算列表包含的元素数
Arguments:
arr {[list]} -- [列表]
Keyword Arguments:
count {int} -- [列表包含的元素数] (default: {0})
Returns:
[int] -- [列表包含的元素数]
"""
if len(arr):
arr.pop(0)
count += 1
return elements_counter(arr, count)
return count | 80809781fd2d6a7a2fa92a4b7d5713771a07f8eb | 709,992 |
from typing import Dict
def dataset_is_open_data(dataset: Dict) -> bool:
"""Check if dataset is tagged as open data."""
is_open_data = dataset.get("isOpenData")
if is_open_data:
return is_open_data["value"] == "true"
return False | fc1591d4a045ba904658bb93577a364145492465 | 709,993 |
def _remove_suffix_apple(path):
"""
Strip off .so or .dylib.
>>> _remove_suffix_apple("libpython.so")
'libpython'
>>> _remove_suffix_apple("libpython.dylib")
'libpython'
>>> _remove_suffix_apple("libpython3.7")
'libpython3.7'
"""
if path.endswith(".dylib"):
return path[:-len(".dylib")]
if path.endswith(".so"):
return path[:-len(".so")]
return path | c5526b0f3420625c2efeba225187f72c7a51fb4b | 709,994 |
def alt_text_to_curly_bracket(text):
"""
Converts the text that appears in the alt attribute of image tags from gatherer
to a curly-bracket mana notation.
ex: 'Green'->{G}, 'Blue or Red'->{U/R}
'Variable Colorless' -> {XC}
'Colorless' -> {C}
'N colorless' -> {N}, where N is some number
"""
def convert_color_to_letter(color):
if color.lower() not in ('red', 'white', 'blue', 'green', 'black', 'colorless', 'tap', 'energy'):
# some cards have weird split mana costs where you can pay N colorless
# or one of a specific color.
# Since we're ending up here, and what we're given isn't a color, lets assume its N
return color
else:
if color.lower() == 'blue': return 'U'
else: return color[0].upper()
try:
val = int(text, 10)
except Exception:
pass
else:
# This is just a number. Easy enough.
return f"{{{text}}}"
if ' or ' in text:
# this is a compound color, not as easy to deal with.
text = text.replace('or', '')
text = '/'.join([convert_color_to_letter(x) for x in text.split()])
else:
if 'Variable' in text:
text = 'X'
else:
# hopefully all that's left is just simple color symbols.
text = convert_color_to_letter(text)
# at this point we've hopefully
return f"{{{text}}}" | c604b236a8d0baeff244e0e246176a406674c9e2 | 709,995 |
from typing import Tuple
import torch
def sum_last_4_layers(sequence_outputs: Tuple[torch.Tensor]) -> torch.Tensor:
"""Sums the last 4 hidden representations of a sequence output of BERT.
Args:
-----
sequence_output: Tuple of tensors of shape (batch, seq_length, hidden_size).
For BERT base, the Tuple has length 13.
Returns:
--------
summed_layers: Tensor of shape (batch, seq_length, hidden_size)
"""
last_layers = sequence_outputs[-4:]
return torch.stack(last_layers, dim=0).sum(dim=0) | 14bba441a116712d1431b1ee6dda33dc5ec4142c | 709,996 |
def list2str(lst: list) -> str:
"""
将 list 内的元素转化为字符串,使得打印时能够按行输出并在前面加上序号(从1开始)
e.g.
In:
lst = [a,b,c]
str = list2str(lst)
print(str)
Out:
1. a
2. b
3. c
"""
i = 1
res_list = []
for x in lst:
res_list.append(str(i)+'. '+str(x))
i += 1
res_str = '\n'.join(res_list)
return res_str | 3da11748d650e234c082255b8d7dff5e56e65732 | 709,997 |
def ithOfNPointsOnCircleY(i,n,r):
"""
return x coordinate of ith value of n points on circle of radius r
points are numbered from 0 through n-1, spread counterclockwise around circle
point 0 is at angle 0, as of on a unit circle, i.e. at point (0,r)
"""
# Hints: similar to ithOfNPointsOnCircleX, but use r sin (theta)
return "stub" | d4e697145423146b085f8423315c795745498afd | 709,999 |