content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def taxon_id(_):
"""
Always returns 10090, the mouse taxon id.
"""
return 10090 | 117fe7f8d56eb9be4ee2b0f4d782b806576faedf | 1,225 |
def getLogisticModelNames(config):
"""
Get the names of the models present in the configobj
Args:
config: configobj object defining the model and its inputs.
Returns:
list: list of model names.
"""
names = []
lmodel_space = config
for key, value in lmodel_space.items():
if isinstance(value, str):
continue
else: # this is a model
names.append(key)
return names | f7f82b12eb50a58c92970b5c2a8f99eb01945523 | 1,227 |
def mp0(g0):
"""Return 0th order free energy."""
return g0.sum() | 5aa3580fec1322bd7b4e357ec6bee4d52fae592e | 1,228 |
from typing import Dict
def most_repeated_character(string: str) -> str:
"""
Find the most repeated character in a string.
:param string:
:return:
"""
map: Dict[str, int] = {}
for letter in string:
if letter not in map:
map[letter] = 1
else:
map[letter] += 1
return sorted(map.items(), key=lambda item: item[1], reverse=True)[0][0] | c59a1e0a552f12c7561ecdb11530f98f15076cdc | 1,230 |
def transitions(bits):
"""Count the number of transitions in a bit sequence.
>>> assert transitions([0, 0]) == 0
>>> assert transitions([0, 1]) == 1
>>> assert transitions([1, 1]) == 0
>>> assert transitions([1, 0]) == 1
>>> assert transitions([0, 0, 0]) == 0
>>> assert transitions([0, 1, 0]) == 2
>>> assert transitions([1, 1, 0]) == 1
>>> assert transitions([1, 0, 0]) == 1
>>> assert transitions([0, 0, 1]) == 1
>>> assert transitions([0, 1, 1]) == 1
>>> assert transitions([1, 1, 1]) == 0
>>> assert transitions([1, 0, 1]) == 2
"""
transitions = 0
for i in range(0, len(bits)-1):
if bits[i] != bits[i+1]:
transitions += 1
return transitions | bc65f7b57508fc0c34275c4794d73c106bce07fd | 1,231 |
def _convert_code(code):
"""
将聚宽形式的代码转化为 xalpha 形式
:param code:
:return:
"""
no, mk = code.split(".")
if mk == "XSHG":
return "SH" + no
elif mk == "XSHE":
return "SZ" + no | 11ffcde407da7afaaf0eb28a80244d85f5136199 | 1,232 |
def _is_arg_name(s, index, node):
"""Search for the name of the argument. Right-to-left."""
if not node.arg:
return False
return s[index : index+len(node.arg)] == node.arg | b0c995ea553184f266fd968ad60b4c5fb19a55d4 | 1,233 |
import socket
import fcntl
import struct
def get_ip_address(dev="eth0"):
"""Retrieves the IP address via SIOCGIFADDR - only tested on Linux."""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', dev[:15]))[20:24])
except:
return None | 96f59f17937543ed9cd4652af4703eaf975b8069 | 1,234 |
def cleared_nickname(nick: str) -> str:
"""Perform nickname clearing on given nickname"""
if nick.startswith(('+', '!')):
nick = nick[1:]
if nick.endswith('#'):
nick = nick[:-1]
if all(nick.rpartition('(')):
nick = nick.rpartition('(')[0]
return nick | f3a5c838f0518a929dfa8b65f83a1d4c6e6dbbe4 | 1,237 |
import os
def is_subdir(path, directory):
"""Check if path is a sub of directory.
Arguments:
path (string):
the path to check
direcotry (string):
the path to use as relative starting point.
Returns:
bool: True if path is a sub of directory or False otherwise.
"""
try:
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
except ValueError:
# filename and folder are ondifferent mount points
return False | 6d947da6fada3b04f9b75728260b34ddfbbb3724 | 1,238 |
def get_hex(fh, nbytes=1):
"""
get nbyte bytes (1 by default)
and display as hexidecimal
"""
hstr = ""
for i in range(nbytes):
b = "%02X " % ord(fh)
hstr += b
return hstr | b1d426f7bfcceffa829c9dcc1150f32be5c48413 | 1,239 |
def gen_fov_chan_names(num_fovs, num_chans, return_imgs=False, use_delimiter=False):
"""Generate fov and channel names
Names have the format 'fov0', 'fov1', ..., 'fovN' for fovs and 'chan0', 'chan1', ...,
'chanM' for channels.
Args:
num_fovs (int):
Number of fov names to create
num_chans (int):
Number of channel names to create
return_imgs (bool):
Return 'chanK.tiff' as well if True. Default is False
use_delimiter (bool):
Appends '_otherinfo' to the first fov. Useful for testing fov id extraction from
filenames. Default is False
Returns:
tuple (list, list) or (list, list, list):
If return_imgs is False, only fov and channel names are returned
If return_imgs is True, image names will also be returned
"""
fovs = [f'fov{i}' for i in range(num_fovs)]
if use_delimiter:
fovs[0] = f'{fovs[0]}_otherinfo'
chans = [f'chan{i}' for i in range(num_chans)]
if return_imgs:
imgs = [f'{chan}.tiff' for chan in chans]
return fovs, chans, imgs
else:
return fovs, chans | 417490259c42a52c58aab418fbb63185602e6750 | 1,240 |
import numpy
def kabsch_superpose(P, Q): # P,Q: vstack'ed matrix
"""
Usage:
P = numpy.vstack([a2, b2, c2])
Q = numpy.vstack([a1, b1, c1])
m = kabsch_superpose(P, Q)
newP = numpy.dot(m, P)
"""
A = numpy.dot(numpy.transpose(P), Q)
U, s, V = numpy.linalg.svd(A)
tmp = numpy.identity(3)
tmp[2,2] = numpy.sign(numpy.linalg.det(A))
R = numpy.dot(numpy.dot(numpy.transpose(V), tmp), numpy.transpose(U))
return R | 56b7b9c3168e644ad71bee2146af3e4ae455c648 | 1,241 |
import os
def download_pkg():
"""第二步下载相关环境需要的第三方库
:return: bool
"""
print("正在下载安装必要的第三方库文件...")
try:
# 如果需要使用IT之家爬虫还需要下载selenium、BeautifulSoup4、requests。可添加到后面
os.system('pip install flask flask_cors flask_wtf flask_mail pymysql redis apscheduler xlwt psutil ')
print("安装成功...")
flag = True
except Exception as e:
print("下载安装失败...原因是:%s" % e)
flag = False
return flag | fed51c16d21cb0425c13d737a20b141e91eae2d6 | 1,242 |
def flatten_list(a_list, parent_list=None):
"""Given a list/tuple as entry point, return a flattened list version.
EG:
>>> flatten_list([1, 2, [3, 4]])
[1, 2, 3, 4]
NB: The kwargs are only for internal use of the function and should not be
used by the caller.
"""
if parent_list is None:
parent_list = []
for element in a_list:
if isinstance(element, list):
flatten_list(element, parent_list=parent_list)
elif isinstance(element, tuple):
flatten_list(element, parent_list=parent_list)
else:
parent_list.append(element)
return parent_list | dd6c9c66a370e65744ede40dfdc295b0ec63379a | 1,243 |
def list_to_dict(config):
"""
Convert list based beacon configuration
into a dictionary.
"""
_config = {}
list(map(_config.update, config))
return _config | 3d7ace7612e67a0c406a2a400ad3147f99dbef0a | 1,244 |
def remove_provinces(data, date_range):
"""
REMOVE PROVINCES
:param data: The Data received from the API
:param date_range: the date range of the data
:return: data after removing provinces
"""
countries_with_provinces = []
names_of_countries_with_prov = []
# get countries with provinces
for country in data[:]:
if country['province'] is not None:
if country['country'] not in names_of_countries_with_prov:
names_of_countries_with_prov.append(country['country'])
countries_with_provinces.append(data.pop(data.index(country)))
else:
pass
# deal with countries with provinces
for country_name in names_of_countries_with_prov[:]: # for each country,
countries = list(
filter(lambda x: x['country'] == country_name, countries_with_provinces))
names_of_countries_with_prov.remove(country_name)
# calculate total cases, deaths & recovered per day
cases = {}
recovered = {}
deaths = {}
for date in date_range:
cs = 0
dt = 0
rc = 0
# sum data up per province
for prov in countries:
cs += prov['timeline']['cases'][date]
dt += prov['timeline']['deaths'][date]
rc += prov['timeline']['recovered'][date]
cases[date] = cs
recovered[date] = rc
deaths[date] = dt
# return country after removing provinces
totals = ({'country': country_name, 'province': None, 'timeline': {
'cases': cases, 'deaths': deaths, 'recovered': recovered}})
data.append(totals)
return data | 05e973254402fb2c9873fa065d45a6a5dd3da353 | 1,245 |
import re
def readConfigFile(filePath):
""" Read the config file and generate a dictionnary containing an entry for
every modules of the installation. """
modules_attributes_list = []
confFile = open(filePath, "r")
for i, line in enumerate(confFile.readlines()):
# Remove everything that is written after "#" character (comments)
line = line.split("#")[0]
line = line.split("//")[0]
line = line.split("$")[0]
# Remove special characters
line = re.sub('[!@#$\0\\n ]','',line)
# Get the MAC addresses and the modules number
words = line.split(",")
if len(words) == 4:
modID = int(words[0])
posY = int(words[1])
posX = int(words[2])
macAddr = words[3]
modules_attributes_list.append((modID, posY, posX, macAddr))
elif len(words) < 2:
pass
else :
raise AttributeError("Wrong formatting of the MAC file.")
return modules_attributes_list | fadaec4dd005d6337eb5950b8782d5db944fb4cc | 1,246 |
def unpad_pkcs7(data):
"""
Strips PKCS#7 padding from data.
Raises ValueError if padding is invalid.
"""
if len(data) == 0:
raise ValueError("Error: Empty input.")
pad_value = data[-1]
if pad_value == 0 or pad_value > 16:
raise ValueError("Error: Invalid padding.")
for i in range(1, pad_value + 1):
if data[-i] != pad_value:
raise ValueError("Error: Invalid padding.")
unpadded = data[: (len(data) - pad_value)]
return unpadded | 27e59b8a880c130997f19814135c09cb6e94354d | 1,247 |
def sigmaLabel(ax, xlabel, ylabel, sigma=None):
"""Label the axes on a figure with some uncertainty."""
confStr = r'$\pm{} \sigma$'.format(sigma) if sigma is not None else ''
ax.set_xlabel(xlabel + confStr)
ax.set_ylabel(ylabel + confStr)
return ax | 8ecf5ae2defd0d67c545943ea48992906612282e | 1,250 |
import os
def get_artifact_path(name):
"""Получение пути для сохранения артефакта. Side-эффект: Создание директории
@param name: Название артефакта
@return Путь для сохранения
"""
if not os.path.exists('../artifacts/'):
os.makedirs('../artifacts/')
path = f'../artifacts/{name}.png'
print(f'New artifact: {path}')
return path | 99c076c5803b418b27deeec0f65a5a42a24f3579 | 1,251 |
import re
def remove_multispaces(text):
""" Replace multiple spaces with only 1 space """
return [re.sub(r' +', " ",word) for word in text] | 0b87f6a4b0d49931b3f4bec6f9c313be05d476f0 | 1,252 |
def euclidean(a,b):
"""Calculate GCD(a,b) with the Euclidean algorithm.
Args:
a (Integer): an integer > 0.
b (Integer): an integer > 0.
Returns:
Integer: GCD(a,b) = m ∈ ℕ : (m|a ⋀ m|b) ⋀ (∄ n ∈ ℕ : (n|a ⋀ n|b) ⋀ n>m).
"""
if(a<b):
a,b = b,a
a, b = abs(a), abs(b)
while a != 0:
a, b = b % a, a
return b | 8af351e251e52336d7ef946a28bb6d666bff97c3 | 1,253 |
def add(n1, n2, base=10):
"""Add two numbers represented as lower-endian digit lists."""
k = max(len(n1), len(n2)) + 1
d1 = n1 + [0 for _ in range(k - len(n1))]
d2 = n2 + [0 for _ in range(k - len(n2))]
res = []
carry = 0
for i in range(k):
if d1[i] + d2[i] + carry < base:
res.append(d1[i] + d2[i] + carry)
carry = 0
else:
res.append(d1[i] + d2[i] + carry - base)
carry = 1
while res and res[-1] == 0:
res = res[:-1]
if res: return res
return [0] | 098bfa9ebedf7f219a6f9910e98c4cf9cbf13aa8 | 1,254 |
from typing import Optional
import re
def parse_progress_line(prefix: str, line: str) -> Optional[float]:
"""Extract time in seconds from a prefixed string."""
regexp = prefix + r"(?P<hours>\d+):(?P<minutes>\d{2}):(?P<seconds>\d{2}.\d{2})"
match = re.search(regexp, line)
if not match:
return None
return (
int(match.group("hours")) * 3600
+ int(match.group("minutes")) * 60
+ float(match.group("seconds"))
) | 690b2f0e48a5f584da646f9e4058ed75e654251e | 1,255 |
def add_missing_flows(data):
"""There are some flows not given in ReCiPe that seem like they should be there, given the relatively coarse precision of these CFs."""
new_cfs = {
"managed forest": {
"amount": 0.3,
"flows": [
"occupation, forest, unspecified",
"occupation, field margin/hedgerow",
],
},
"annual crops": {
"amount": 1.0,
"flows": [
"occupation, annual crop, flooded crop",
"occupation, annual crop, irrigated, extensive",
],
},
"pasture": {
"amount": 0.55,
"flows": [
"occupation, arable land, unspecified use",
"occupation, grassland, natural, for livestock grazing",
"occupation, heterogeneous, agricultural",
],
},
"artificial area": {"amount": 0.73, "flows": [],},
"permanent crops": {
"amount": 0.7,
"flows": [
"occupation, permanent crop, irrigated",
"occupation, permanent crop, irrigated, extensive",
"occupation, permanent crop, non-irrigated",
"occupation, permanent crop, non-irrigated, extensive",
],
},
}
""" The following were included in an earlier version of ReCiPe, but are skipped here, as we don't have enough info to use them consistently:
* 'occupation, bare area (non-use)',
* 'occupation, cropland fallow (non-use)',
* 'occupation, forest, primary (non-use)',
* 'occupation, forest, secondary (non-use)',
* 'occupation, inland waterbody, unspecified',
* 'occupation, lake, natural (non-use)',
* 'occupation, river, natural (non-use)',
* 'occupation, seabed, natural (non-use)',
* 'occupation, seabed, unspecified',
* 'occupation, snow and ice (non-use)',
* 'occupation, unspecified',
* 'occupation, unspecified, natural (non-use)',
* 'occupation, wetland, coastal (non-use)',
* 'occupation, wetland, inland (non-use)'
"""
for ds in data:
ds["exchanges"].extend(
[
{"name": flow, "amount": obj["amount"]}
for obj in new_cfs.values()
for flow in obj["flows"]
]
)
return data | e23184bb7363db4777d9f693a3fdc4ace9f8ff14 | 1,256 |
def num_fixed_points(permutation):
"""
Compute the number of fixed points (elements mapping to themselves) of a permutation.
:param permutation: Permutation in one-line notation (length n tuple of the numbers 0, 1, ..., n-1).
:return: Number of fixed points in the permutation.
.. rubric:: Examples
>>> num_fixed_points((0, 2, 1))
1
"""
n = 0
for i in range(len(permutation)):
if permutation[i] == i:
n += 1
return n | 124713cd4c90988c43630a74881e7107ff748682 | 1,257 |
def get_project_details(p):
"""Extract from the pickle object detailed information about
a given project and parse it in a comprehensive dict structure."""
res = {}
project = p['projects'][0]
fields = {'Owner(s)': 'project_owners',
'Member(s)': 'project_members',
'Collaborator(s)': 'project_collabs',
'User(s)': 'project_users',
'last_accessed': 'project_last_access'}
for k, v in fields.items():
res[k] = project[v].strip().split(' <br/> ')
if res[k][0] == '':
res[k] = ['None']
for e in ['insert_user', 'insert_date', 'project_access', 'name',
'project_last_workflow']:
res[e] = project[e]
return res | f8ba3debdd8be7cc7a906851a6a6fb1e3c5f039a | 1,258 |
def iscode(c):
"""
Tests if argument type could be lines of code,
i.e. list of strings
"""
if type(c) == type([]):
if c:
return type(c[0]) == type('')
else:
return True
else: return False | e60da6c05922ff1e67db15fa4caa1500a8f470c7 | 1,259 |
import hashlib
def extract_hash_parts(repo):
"""Extract hash parts from repo"""
full_hash = hashlib.sha1(repo.encode("utf-8")).hexdigest()
return full_hash[:2], full_hash[2:] | aa1aebaf9b8330539eb0266c4ff97fd3459753c8 | 1,260 |
import re
def split_bucket(s3_key):
"""
Returns the bucket name and the key from an s3 location string.
"""
match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE)
if not match:
return None, s3_key
return match.group(1), match.group(2) | 6b854bdc9d105643a9fa528e6fefd19672451e63 | 1,261 |
def contains_chinese(ustr):
"""
将字符串中的中文去除
Args:
ustr: 字符串
Returns: 去除中文的字符串
"""
return any('\u4e00' <= char <= '\u9fff' for char in ustr) | 8d53a214e1754e1c129f1583a298f5a19e1f76d3 | 1,262 |
import os
def python_modules():
"""Determine if there are python modules in the cwd.
Returns:
list of python modules as strings
"""
ignored = ["setup.py", "conftest.py"]
py_modules = []
for file_ in os.listdir(os.path.abspath(os.curdir)):
if file_ in ignored or not os.path.isfile(file_):
continue
file_name, file_ext = os.path.splitext(file_)
if file_ext == ".py":
py_modules.append(file_name)
return sorted(py_modules) | eba262b38bddb0f76f614c74a9a0b1c090e48e6b | 1,264 |
def test_bucket():
"""Universal bucket name for use throughout testing"""
return 'test_bucket' | 2f78b1b1bf7ccfff07ca29213d975f3b20f0e9a5 | 1,265 |
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths):
"""Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value."""
hurricanes = dict()
num_hurricanes = len(names)
for i in range(num_hurricanes):
hurricanes[names[i]] = {"Name": names[i],
"Month": months[i],
"Year": years[i],
"Max Sustained Wind": max_sustained_winds[i],
"Areas Affected": areas_affected[i],
"Damage": updated_damages[i],
"Deaths": deaths[i]}
return hurricanes | 5a27d5349113f29d2af55df27a2ee2c2cc524549 | 1,266 |
def fast_dot(M1, M2):
"""
Specialized interface to the numpy.dot function
This assumes that A and B are both 2D arrays (in practice)
When A or B are represented by 1D arrays, they are assumed to reprsent
diagonal arrays
This function then exploits that to provide faster multiplication
"""
if len(M1.shape) in [1, 2] and len(M2.shape) == 1:
return M1*M2
elif len(M1.shape) == 1 and len(M2.shape) == 2:
return M1[:,None]*M2
elif len(M1.shape) == 2 and len(M2.shape) == 2:
return M1.dot(M2)
else:
raise Exception('fast_dot requires shapes to be 1 or 2') | b34e44787f48dfb25af4975e74262f3d8eaa5096 | 1,268 |
def encode(string_):
"""Change String to Integers"""
return (lambda f, s: f(list( ord(c) for c in str(string_) ) , \
s))(lambda f, s: sum(f[i] * 256 ** i for i in \
range(len(f))), str(string_)) | da3a729c2024d80792e08424745dc267ca67dff7 | 1,269 |
def generate_file_prefix(bin_params):
""" Use the bin params to generate a file prefix."""
prefix = "bin_"
for j in range(0, len(bin_params)):
if (j + 1) % 2 != 0:
prefix += str(bin_params[j]) + "-"
else:
prefix += str(bin_params[j]) + "_"
return prefix | cc058a64fcab77f6a4794a8bf7edb1e0e86c040c | 1,270 |
from numpy import array
def match_cam_time(events, frame_times):
"""
Helper function for mapping ephys events to camera times. For each event in events, we return the nearest
camera frame before the event.
Parameters
----------
events : 1D numpy array
Events of interest. Sampled at a higher rate than frame_times.
frame_times : 1D numpy array
Timepoints of camera frames to be assigned to events. Sampled at a lower rate than events.
"""
output = []
for a in events:
lags = array(a - frame_times)
before = len(lags[lags > 0]) - 1
if before >= 0:
output.append(before)
return array(output) | 3f086a0f65a34183a429cf3c50e90fdc742672d3 | 1,271 |
def read_ac(path, cut_off, rnalen):
"""Read the RNA accessibility file and output its positions and values
The file should be a simple table with two columns:
The first column is the position and the second one is the value
'#' will be skipped
"""
access = []
with open(path) as f:
i = 0
while i < rnalen:
for line in f:
line = line.split()
if not line:
continue
elif line[0][0] == "#":
continue
elif len(line) < 2:
continue
else:
v = line[1]
if v == "NA":
access.append(0)
else:
try:
v = 2 ** (-float(v))
except:
continue
if v >= cut_off:
access.append(1)
else:
access.append(0)
i += 1
return access | 0a8b6c2ff6528cf3f21d3b5efce14d59ff8ad2b6 | 1,272 |
def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname | 281ff6dcfae1894dd4685acf433bde89538fe87e | 1,273 |
def get_id_argument(id_card):
"""
获取身份证号码信息
:param id_card:
:return:
"""
id_card = id_card.upper()
id_length = len(id_card)
if id_length == 18:
code = {
'body': id_card[0:17],
'address_code': id_card[0:6],
'birthday_code': id_card[6:14],
'order_code': id_card[14:17],
'check_bit': id_card[17:18],
'type': 18
}
else:
code = {
'body': id_card,
'address_code': id_card[0:6],
'birthday_code': '19' + id_card[6:12],
'order_code': id_card[12:15],
'check_bit': '',
'type': 15
}
return code | ae4cad97e787fe1b0697b6a0f842f0da09795d6a | 1,274 |
def sanitize_date(date_dict: dict):
"""
Function to take the date values entered by the user and check their validity. If valid it returns True,
otherwise it sets the values to None and returns False
:param date_dict:
:return:
"""
month = date_dict["month"]
day = date_dict["day"]
year = date_dict["year"]
date = [month, day, year]
date_is_valid = not any([component is None for component in date])
if date_is_valid:
date_is_valid &= not (month == 2 and day > 29)
date_is_valid &= not (month in [4, 6, 9, 11] and day > 30)
is_leap_year = (year % 4) == 0
is_leap_year &= ((year % 100) != 0 or (year % 400) == 0)
date_is_valid &= not (month == 2 and day == 29 and not is_leap_year)
if not date_is_valid:
date_dict["month"] = date_dict["day"] = date_dict["year"] = None
return False
return True | c8cc01c8c1259ab8c4b263e36ae9f85a95356017 | 1,275 |
def create_scale(tonic, pattern, octave=1):
"""
Create an octave-repeating scale from a tonic note
and a pattern of intervals
Args:
tonic: root note (midi note number)
pattern: pattern of intervals (list of numbers representing
intervals in semitones)
octave: span of scale (in octaves)
Returns:
list of midi notes in the scale
"""
assert(sum(pattern)==12)
scale = [tonic]
note = tonic
for o in range(octave):
for i in pattern:
note += i
if note <= 127:
scale.append(note)
return scale | f9337289fda2e1b08cd371d3e91cc5a23c9c9822 | 1,276 |
import argparse
import multiprocessing
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Now add all the options to it
parser.add_argument("fasta", type=str,
help="name of the FASTA to read; must be indexable")
parser.add_argument("-n", type=int, default=10000,
help="the number of k-mers to count")
parser.add_argument("-k", type=int, default=150,
help="the length of each k-mer")
parser.add_argument("--thread_count", type=int, default=multiprocessing.cpu_count(),
help="number of k-mer counting threads to use")
parser.add_argument("--batch_size", type=int, default=10000,
help="number of forward-strand k-mer candidates to count in each batch")
parser.add_argument("--bloom_error", type=float, default=1E-4,
help="error rate on the bloom filter")
return parser.parse_args(args) | 9ab9bad96af383a8d7441d7ccb0da10ae68dafb5 | 1,278 |
def get_constraints_for_x(cell, board):
"""
Get the constraints for a given cell cell
@param cell Class instance of Variable; a cell of the Sudoku board
@param board
@return Number of constraints
"""
nconstraints = 0
# Row
for cellj in board[cell.row][:cell.col]:
if cellj.get_domain_size() > 1:
nconstraints += 1
for cellj in board[cell.row][cell.col+1:]:
if cellj.get_domain_size() > 1:
nconstraints += 1
# Col
for irow in range(cell.row):
if board[irow][cell.col].get_domain_size() > 1:
nconstraints += 1
for irow in range(cell.row+1, cell.max_domain_val):
if board[irow][cell.col].get_domain_size() > 1:
nconstraints += 1
# .. This would not generalize to a new board, but leave for now
ibox_row = int(cell.row/3) * 3
ibox_col = int(cell.col/3) * 3
if board[ibox_row+1][ibox_col+1].get_domain_size() > 1 \
or board[ibox_row+1][ibox_col+2].get_domain_size() > 1 \
or board[ibox_row+2][ibox_col+1].get_domain_size() > 1 \
or board[ibox_row+2][ibox_col+2].get_domain_size() > 1:
nconstraints += 1
return nconstraints | a46cda54569a12e80b9d52896f07335480799cb1 | 1,279 |
def get_subgraph_pos(G, pos):
""" Returns the filtered positions for subgraph G. If subgraph = original graph then pos will be returned.
Parameters
----------
G : nx.Graph
A graph object.
Pos : dict
A dictionary with nodes as keys and positions as values.
Example
-------
>>> pos = nx.spring_layout(G)
>>> subgraph_nodes = ['1','2','3']
>>> subgraph = G.subgraph(subgraph_nodes)
>>> subgraph_positions = get_subgraph_pos(subgraph,pos)
Returns
-------
dict
Assuming positions were generated earlier for a larger graph with some layout algorithm
this functions returns the filtered positions by the subgraph.
"""
return {k: v for k, v in pos.items() if k in G.nodes()} | ca7fc389cc51aaace7a751f2107fe5cfbfd22e6c | 1,280 |
def pds3_label_gen_date(file):
"""Returns the creation date of a given PDS3 label.
:param path: File path
:type path: str
:return: Creation date
:rtype: str
"""
generation_date = "N/A"
with open(file, "r") as f:
for line in f:
if "PRODUCT_CREATION_TIME" in line:
generation_date = line.split("=")[1].strip()
return generation_date | c2877fa9246dd0c12c6ea47635ab248dc038b179 | 1,283 |
def harmony(*args):
"""
Takes an arbitrary number of floats and prints their harmonic
medium value. Calculation is done with formula:
number_of_args \ (1 \ item1 + 1 \ item2 + ...)
Args:
*args (tuple): number of arguments with a type: float, integer
Returns:
float: harmonic medium value
"""
result = 0
if 0 in args:
return 0.0
for item in args:
result += 1 / item
return len(args) / result | bc66276b3ef27ef0bfd059afa8ca7afd5d9cbb82 | 1,284 |
def views():
""" Used for the creation of Orientation objects with
`Orientations.from_view_up`
"""
return [[1, 0, 0], [2, 0, 0], [-1, 0, 0]] | 21ffce8e8a56cf31e2d03a6384d584bcb4bfb2c8 | 1,285 |
def pack(pieces=()):
"""
Join a sequence of strings together.
:param list pieces: list of strings
:rtype: bytes
"""
return b''.join(pieces) | ffd0852a16c6292f921e5cf205301171e3a96fd3 | 1,286 |
def wizard_process_received_form(form):
""" Processing of form received during the time measure
Expected result example: {1: '00:43.42', 2: '00:41.35', 3: '00:39.14', 4: '00:27.54'}
"""
lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith("line")}
# print(lines)
times = {key.split('_')[1]: value for key, value in form.items() if key.startswith("time")}
# print(times)
return {int(value): times[key] for key, value in lines.items()} | 54b10589cab7ce689b64f5373d2f0a998044db82 | 1,291 |
import inspect
def getsource(obj,is_binary=False):
"""Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Inputs:
- obj: an object whose source code we will attempt to extract.
Optional inputs:
- is_binary: whether the object is known to come from a binary source.
This implementation will skip returning any output for binary objects, but
custom extractors may know how to meaningfully process them."""
if is_binary:
return None
else:
return inspect.getsource(obj) | 9e97a030c695b9ea50d27abc5253e47be7d4c06a | 1,292 |
def splitext_all(_filename):
"""split all extensions (after the first .) from the filename
should work similar to os.path.splitext (but that splits only the last extension)
"""
_name, _extensions = _filename.split('.')[0], '.'.join(_filename.split('.')[1:])
return(_name, "."+ _extensions) | bf9e4ee06eb30dfeb7898ce6e34607bef20b290b | 1,294 |
def tag_in_tags(entity, attribute, value):
"""
Return true if the provided entity has
a tag of value in its tag list.
"""
return value in entity.tags | ad88be5f8848b387f2a261ce5506dffde285a1d8 | 1,296 |
def generate_finding_title(title):
"""
Generate a consistent title for a finding in AWS Security Hub
* Setup as a function for consistency
"""
return "Trend Micro: {}".format(title) | 0cf390c2579e06c2166b086332035b864d3db1e3 | 1,297 |
import os
import shutil
import click
def tryrmcache(dir_name, verbose=False):
"""
removes all __pycache__ starting from directory dir_name
all the way to leaf directory
Args:
dir_name(string) : path from where to start removing pycache
"""
# directory_list = list()
is_removed = False
for root, dirs, _ in os.walk(dir_name, topdown=False):
for name in dirs:
# directory_list.append(os.path.join(root, name))
if name == "__pycache__":
shutil.rmtree(os.path.join(root, name))
is_removed = True
if verbose:
if is_removed:
click.echo("[x] __pycache__ successfully deleted")
else:
click.echo("[ ] __pycache__ doesn't exist", err=True)
return is_removed | d4453352b30a8d3683b928f864536bcd1d6fda9f | 1,298 |
import torch
def to_device(x, device):
"""Cast a hierarchical object to pytorch device"""
if isinstance(x, torch.Tensor):
return x.to(device)
elif isinstance(x, dict):
for k in list(x.keys()):
x[k] = to_device(x[k], device)
return x
elif isinstance(x, list) or isinstance(x, tuple):
return type(x)(to_device(t, device) for t in x)
else:
raise ValueError('Wrong type !') | a315905fb0cf6d6720103c0d22440418ebd41bf1 | 1,299 |
def f(x):
"""Cubic function."""
return x**3 | 13832221de3490dbd92f4f1a26854baec7010023 | 1,300 |
import textwrap
def dedent(ind, text):
"""
Dedent text to the specific indentation level.
:param ind: common indentation level for the resulting text (number of spaces to append to every line)
:param text: text that should be transformed.
:return: ``text`` with all common indentation removed, and then the specified amount of indentation added.
"""
text2 = textwrap.dedent(text)
if ind == 0:
return text2
indent_str = " " * ind
return "\n".join(indent_str + line for line in text2.split("\n")) | 271b9fd270d78c4bc952af31d3d9be0ff6bdab73 | 1,301 |
def get_polarimeter_index(pol_name):
"""Return the progressive number of the polarimeter within the board (0…7)
Args:
pol_name (str): Name of the polarimeter, like ``R0`` or ``W3``.
Returns:
An integer from 0 to 7.
"""
if pol_name[0] == "W":
return 7
else:
return int(pol_name[1]) | 0068931868e214896f6263e58fc09215352d502c | 1,305 |
import io
import base64
def get_image_html_tag(fig, format="svg"):
"""
Returns an HTML tag with embedded image data in the given format.
:param fig: a matplotlib figure instance
:param format: output image format (passed to fig.savefig)
"""
stream = io.BytesIO()
# bbox_inches: expand the canvas to include the legend that was put outside the plot
# see https://stackoverflow.com/a/43439132
fig.savefig(stream, format=format, bbox_inches="tight")
data = stream.getvalue()
if format == "svg":
return data.decode("utf-8")
data = base64.b64encode(data).decode("utf-8")
return f"<img src=\"data:image/{format};base64,{data}\">" | f5c59a6f4f70fb6616cec4619d8cbf9ca2e28529 | 1,308 |
def reformat_language_tuple(langval):
"""Produce standardly-formatted language specification string using given language tuple.
:param langval: `tuple` in form ('<language>', '<language variant>'). Example: ('en', 'US')
:return: `string` formatted in form '<language>-<language-variant>'
"""
if langval:
langval_base, langval_variant = langval
if langval_variant:
langval_base = '{0}-{1}'.format(langval_base, langval_variant)
return langval_base
else:
return None | 63c479d7dd273f31b9bdcc6c0ce81d4267a43714 | 1,309 |
def memdiff_search(bytes1, bytes2):
"""
Use binary searching to find the offset of the first difference
between two strings.
:param bytes1: The original sequence of bytes
:param bytes2: A sequence of bytes to compare with bytes1
:type bytes1: str
:type bytes2: str
:rtype: int offset of the first location a and b differ, None if strings match
"""
# Prevent infinite recursion on inputs with length of one
half = (len(bytes1) // 2) or 1
# Compare first half of the string
if bytes1[:half] != bytes2[:half]:
# Have we found the first diff?
if bytes1[0] != bytes2[0]:
return 0
return memdiff_search(bytes1[:half], bytes2[:half])
# Compare second half of the string
if bytes1[half:] != bytes2[half:]:
return memdiff_search(bytes1[half:], bytes2[half:]) + half | fbcb221c77730c45be4c81a6ae7515e602468af5 | 1,310 |
import os
def components(path):
"""Split a POSIX path into components."""
head, tail = os.path.split(os.path.normpath(path))
if head == "":
return [tail]
elif head == "/":
return [head + tail]
else:
return components(head) + [tail] | f29ae64104255450f5889a7440679342af767c9b | 1,311 |
import json
def _classes_dict(filename):
"""
Open JSON file and read the data for the Classes (and Origins).
filename - the file name as a string.
Runtime: O(n)
"""
class_dict = {} # {'robot': ['blitzcrank']}
class_bonus_dict = {}
dict = { 1: {}, 2: {}, 3: {}, 4 : {}, 6 : {}} # { 1 : { 'robot' : set['blitzcrank'], 'exile' : set['yasuo'] }, 2 : ... }
with open(filename) as json_file:
data = json.load(json_file)
for class_obj in data.items(): # O(n)
key = class_obj[1]['key'] # String
name = class_obj[1]['name'] # String
description = class_obj[1]['description'] # String
accentChampionImage = class_obj[1]['accentChampionImage'] # URL as String
bonuses = class_obj[1]['bonuses'] # Array [{'needed': int, 'effect': string}]
needed = bonuses[-1]['needed'] # Check the highest number for needed. (In this case it's the last item in the array)
class_dict[key] = []
class_bonus_dict[key] = needed
dict[needed].update({class_obj[0]: []})
return dict | 44fa2acec6c7235995bfdabaab149b4cba2cb7cc | 1,312 |
def stringify(li,delimiter):
""" Converts list entries to strings and joins with delimiter."""
string_list = map(str,li)
return delimiter.join(string_list) | a4c35a19d8ea654a802cd3f92ababcbdfdf0ecfb | 1,313 |
def norm_w(x, w):
"""
Compute sum_i( w[i] * |x[i]| ).
See p. 7.
"""
return (w * abs(x)).sum() | a9825750cb6ee0bbbe87b0c4d1bd132bcfca90db | 1,314 |
def load_module(module, app):
"""Load an object from a Python module
In:
- ``module`` -- name of the module
- ``app`` -- name of the object to load
Return:
- (the object, None)
"""
r = __import__(module, fromlist=('',))
if app is not None:
r = getattr(r, app)
return r, None | 858d9d0bf91ff7d83ad391218b8ff1b37007b43b | 1,315 |
from unittest.mock import Mock
def make_subprocess_hook_mock(exit_code: int, output: str) -> Mock:
"""Mock a SubprocessHook factory object for use in testing.
This mock allows us to validate that the RenvOperator is executing
subprocess commands as expected without running them for real.
"""
result_mock = Mock()
result_mock.exit_code = exit_code
result_mock.output = output
hook_instance_mock = Mock()
hook_instance_mock.run_command = Mock(return_value=result_mock)
hook_factory_mock = Mock(return_value=hook_instance_mock)
return hook_factory_mock | a047608503be8bc7fc4b782139e7d12145efb3cd | 1,316 |
def binstr2int(bin_str: str) -> int:
"""转换二进制形式的字符串为10进制数字, 和int2binstr相反
Args:
bin_str: 二进制字符串, 比如: '0b0011'或'0011'
Returns:
转换后的10进制整数
"""
return int(bin_str, 2) | 87c6ac16c2215e533cb407407bef926ed8668e3e | 1,317 |
def _scale(tensor):
"""Scale a tensor based on min and max of each example and channel
Resulting tensor has range (-1, 1).
Parameters
----------
tensor : torch.Tensor or torch.autograd.Variable
Tensor to scale of shape BxCxHxW
Returns
-------
Tuple (scaled_tensor, min, max), where min and max are tensors
containing the values used for normalizing the tensor
"""
b, c, h, w = tensor.shape
out = tensor.view(b, c, h * w)
minimum, _ = out.min(dim=2, keepdim=True)
out = out - minimum
maximum, _ = out.max(dim=2, keepdim=True)
out = out / maximum # out has range (0, 1)
out = out * 2 - 1 # out has range (-1, 1)
return out.view(b, c, h, w), minimum, maximum | 64eed9bd70c543def6456f3af89fa588ec35bca8 | 1,318 |
def url(endpoint, path):
"""append the provided path to the endpoint to build an url"""
return f"{endpoint.rstrip('/')}/{path}" | dee733845984bfc4cf5728e9614cce08d19a2936 | 1,319 |
def is_collision(line_seg1, line_seg2):
"""
Checks for a collision between line segments p1(x1, y1) -> q1(x2, y2)
and p2(x3, y3) -> q2(x4, y4)
"""
def on_segment(p1, p2, p3):
if (p2[0] <= max(p1[0], p3[0])) & (p2[0] >= min(p1[0], p3[0])) & (p2[1] <= max(p1[1], p3[1])) & (p2[1] >= min(p1[1], p3[1])):
return True
return False
def orientation(p1, p2, p3):
val = ((p2[1] - p1[1]) * (p3[0] - p2[0])) - ((p2[0] - p1[0]) * (p3[1] - p2[1]))
if val == 0:
return 0
elif val > 0:
return 1
elif val < 0:
return 2
p1, q1 = line_seg1[0], line_seg1[1]
p2, q2 = line_seg2[0], line_seg2[1]
o1 = orientation(p1, q1, p2)
o2 = orientation(p1, q1, q2)
o3 = orientation(p2, q2, p1)
o4 = orientation(p2, q2, q1)
if (o1 != o2) & (o3 != o4):
return True
if (o1 == 0 & on_segment(p1, p2, q1)):
return True
if (o2 == 0 & on_segment(p1, q2, q1)):
return True
if (o3 == 0 & on_segment(p2, p1, q2)):
return True
if (o4 == 0 & on_segment(p2, q1, q2)):
return True
return False | 17dba61faebe50336cbc2cd2cc56c49474db5431 | 1,320 |
def fibonacci(position):
"""
Based on a position returns the number in the Fibonacci sequence
on that position
"""
if position == 0:
return 0
elif position == 1:
return 1
return fibonacci(position-1)+fibonacci(position-2) | cc4fe0860fa97234ead2179e18d208a8567e0cb3 | 1,322 |
import asyncio
import functools
def bound_concurrency(size):
"""Decorator to limit concurrency on coroutine calls"""
sem = asyncio.Semaphore(size)
def decorator(func):
"""Actual decorator"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
"""Wrapper"""
async with sem:
return await func(*args, **kwargs)
return wrapper
return decorator | 030e4dea0efccf9d5f2cbe4a40f3e6f32dfef846 | 1,323 |
def Capitalize(v):
"""Capitalise a string.
>>> s = Schema(Capitalize)
>>> s('hello world')
'Hello world'
"""
return str(v).capitalize() | 9072ea91b946694bbb1410fb10a5b1b1f5cdd7c2 | 1,325 |
def pg_index_exists(conn, schema_name: str, table_name: str, index_name: str) -> bool:
"""
Does a postgres index exist?
Unlike pg_exists(), we don't need heightened permissions on the table.
So, for example, Explorer's limited-permission user can check agdc/ODC tables
that it doesn't own.
"""
return (
conn.execute(
"""
select indexname
from pg_indexes
where schemaname=%(schema_name)s and
tablename=%(table_name)s and
indexname=%(index_name)s
""",
schema_name=schema_name,
table_name=table_name,
index_name=index_name,
).scalar()
is not None
) | 98ebdc0db7f3e42050e61205fd17309d015352a0 | 1,326 |
import re
def sentence_segment(text, delimiters=('?', '?', '!', '!', '。', ';', '……', '…'), include_symbols=True):
"""
Sentence segmentation
:param text: query
:param delimiters: set
:param include_symbols: bool
:return: list(word, idx)
"""
result = []
delimiters = set([item for item in delimiters])
delimiters_str = '|'.join(delimiters)
blocks = re.split(delimiters_str, text)
start_idx = 0
for blk in blocks:
if not blk:
continue
result.append((blk, start_idx))
start_idx += len(blk)
if include_symbols and start_idx < len(text):
result.append((text[start_idx], start_idx))
start_idx += 1
return result | c8860a872e779873330eaded8e9951cabdbba01e | 1,328 |
def time_rep_song_to_16th_note_grid(time_rep_song):
"""
Transform the time_rep_song into an array of 16th note with pitches in the onsets
[[60,4],[62,2],[60,2]] -> [60,0,0,0,62,0,60,0]
"""
grid_16th = []
for pair_p_t in time_rep_song:
grid_16th.extend([pair_p_t[0]] + [0 for _ in range(pair_p_t[1]-1)])
return grid_16th | 8986819bd39ae4830d04bf40ab158d310bb45485 | 1,329 |
def update_position(position, velocity):
"""
:param position: position(previus/running) of a particle
:param velocity: the newest velocity that has been calculated during the specific iteration- new velocity is calculated
before the new position
:return: list - new position
"""
pos = []
length = len(position)
for i in range(length):
pos.append(position[i] + velocity[i])
return pos | 7734e4021d958f42d974401b78331bcd2911ac92 | 1,330 |
def require(*modules):
"""Check if the given modules are already available; if not add them to
the dependency list."""
deplist = []
for module in modules:
try:
__import__(module)
except ImportError:
deplist.append(module)
return deplist | 88df83cd33d8bddea63e4d2fbfb4d8351a3c23b1 | 1,331 |
def fixture_base_context(
env_name: str,
) -> dict:
"""Return a basic context"""
ctx = dict(
current_user="a_user",
current_host="a_host",
)
return ctx | fbfed439f784bdd64e93910bbb581955200af2bb | 1,332 |
def evaluation(evaluators, dataset, runners, execution_results, result_data):
"""Evaluate the model outputs.
Args:
evaluators: List of tuples of series and evaluation functions.
dataset: Dataset against which the evaluation is done.
runners: List of runners (contains series ids and loss names).
execution_results: Execution results that include the loss values.
result_data: Dictionary from series names to list of outputs.
Returns:
Dictionary of evaluation names and their values which includes the
metrics applied on respective series loss and loss values from the run.
"""
eval_result = {}
# losses
for runner, result in zip(runners, execution_results):
for name, value in zip(runner.loss_names, result.losses):
eval_result["{}/{}".format(runner.output_series, name)] = value
# evaluation metrics
for generated_id, dataset_id, function in evaluators:
if (not dataset.has_series(dataset_id)
or generated_id not in result_data):
continue
desired_output = dataset.get_series(dataset_id)
model_output = result_data[generated_id]
eval_result["{}/{}".format(generated_id, function.name)] = function(
model_output, desired_output)
return eval_result | ef3470edb8b2336bdc54507a5df8023f8095b995 | 1,333 |
import os
def filename(name):
""" Get filename without extension"""
return os.path.splitext(name)[0] | 9899b6e187684ddb95ff9d1bd7974163a7e3e78b | 1,334 |
import base64
def base64_decode(string):
"""
Decodes data encoded with MIME base64
"""
return base64.b64decode(string) | 38870882fca9e6595e3f5b5f8943d0bf781f006c | 1,335 |
import re
def convert_operand_kind(operand_tuple):
"""Returns the corresponding operand type used in spirv-tools for the given
operand kind and quantifier used in the JSON grammar.
Arguments:
- operand_tuple: a tuple of two elements:
- operand kind: used in the JSON grammar
- quantifier: '', '?', or '*'
Returns:
a string of the enumerant name in spv_operand_type_t
"""
kind, quantifier = operand_tuple
# The following cases are where we differ between the JSON grammar and
# spirv-tools.
if kind == 'IdResultType':
kind = 'TypeId'
elif kind == 'IdResult':
kind = 'ResultId'
elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics':
kind = 'MemorySemanticsId'
elif kind == 'IdScope' or kind == 'Scope':
kind = 'ScopeId'
elif kind == 'IdRef':
kind = 'Id'
elif kind == 'ImageOperands':
kind = 'Image'
elif kind == 'Dim':
kind = 'Dimensionality'
elif kind == 'ImageFormat':
kind = 'SamplerImageFormat'
elif kind == 'KernelEnqueueFlags':
kind = 'KernelEnqFlags'
elif kind == 'LiteralExtInstInteger':
kind = 'ExtensionInstructionNumber'
elif kind == 'LiteralSpecConstantOpInteger':
kind = 'SpecConstantOpNumber'
elif kind == 'LiteralContextDependentNumber':
kind = 'TypedLiteralNumber'
elif kind == 'PairLiteralIntegerIdRef':
kind = 'LiteralIntegerId'
elif kind == 'PairIdRefLiteralInteger':
kind = 'IdLiteralInteger'
elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar
kind = 'Id'
if kind == 'FPRoundingMode':
kind = 'FpRoundingMode'
elif kind == 'FPFastMathMode':
kind = 'FpFastMathMode'
if quantifier == '?':
kind = 'Optional{}'.format(kind)
elif quantifier == '*':
kind = 'Variable{}'.format(kind)
return 'SPV_OPERAND_TYPE_{}'.format(
re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper()) | 3d26a0b330ae64209655b24dfe86578cb4b8724c | 1,336 |
def registered_response_data():
"""Body (bytes) of the registered response."""
return b"response data" | 1ee44d70592747947d76ff757901f44fde5c9946 | 1,337 |
def _ensure_package(base, *parts):
"""Ensure that all the components of a module directory path exist, and
contain a file __init__.py."""
bits = []
for bit in parts[:-1]:
bits.append(bit)
base.ensure(*(bits + ['__init__.py']))
return base.ensure(*parts) | fc9bb95445cc1b0e8ec819dfafdaff7d5afbf372 | 1,338 |
import math
def sigmoid(z):
"""Sigmoid function"""
if z > 100:
return 0
return 1.0 / (1.0 + math.exp(z)) | 097e1a85fc46264cb1c7cd74498d6cfab97e5b88 | 1,339 |
def get_attn_pad_mask(seq_q, seq_k):
"""
由于各句子长度不一样,故需要通过PAD将所有句子填充到指定长度;
故用于填充的PAD在句子中无任何含义,无需注意力关注;
注意力掩码函数,可用于屏蔽单词位置为PAD的位置,将注意力放在其他单词上。
:param seq_q: [batch_size, seq_len]
:param seq_k: [batch_size, seq_len]
"""
batch_size, len_q = seq_q.size()
_, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # [batch_size, 1, len_k], 0代表PAD,eq(0)返回和seq_k同等维度的矩阵
# 若是seq_k某个位置上的元素为0,那么该位置为True,否则为False
# [1, 2, 3, 0] -> [F, F, F, T]
return pad_attn_mask.expand(batch_size, len_q, len_k) | 522fc244c02ec767b80da2f0c9b5cf6720e931c0 | 1,340 |
def node_tree(node: str):
"""Format printing for locate"""
str2list = list(node.replace(' ', ''))
count = 0
for i, e in enumerate(str2list):
if e == '(':
count += 1
str2list[i] = '(\n{}'.format('| ' * count)
elif e == ')':
count -= 1
str2list[i] = '\n{})'.format('| ' * count)
elif e == ',':
str2list[i] = ',\n{}'.format('| ' * count)
elif e == '[':
count += 1
str2list[i] = '[\n{}'.format('| ' * count)
elif e == ']':
count -= 1
str2list[i] = '\n{}]'.format('| ' * count)
return ''.join(str2list) | 010805499cb6e886ec8811949a1d1d013db1d15f | 1,341 |
def lamb1(u,alpha=.5):
"""Approximate the Lambert W function.
Approximate the Lambert W function from its upper and lower bounds.
The parameter alpha (between 0 and 1) determines how close the
approximation is to the lower bound instead of the upper bound.
:arg float u: Modified argument of the function.
:arg float alpha: Bound parameter (default 0.5).
:returns: (-z)-value of the Lambert function.
:raises ValueError: If u is negative.
:raises ValueError: If alpha is not between 0 and 1.
"""
if u < 0:
errmsg = 'Argument u must be positive'
raise ValueError(errmsg)
if alpha < 0 or alpha > 1:
errmsg = 'Parameter alpha must be between 0 and 1'
raise ValueError(errmsg)
beta = (2 + alpha)/3
negz = 1 + (2*u)**.5 + beta*u
return negz | 1d769ccb74334eef55aa1bc0697328b34ba067bc | 1,342 |
def clean_data(df):
"""
remove the duplicates from a dataframe
parameters:
df(Dataframe): data frame
"""
df=df.drop_duplicates()
return df | 7072885f7233c5407060344e6858f89108d61ee8 | 1,343 |
def format_str_for_write(input_str: str) -> bytes:
"""Format a string for writing to SteamVR's stream."""
if len(input_str) < 1:
return "".encode("utf-8")
if input_str[-1] != "\n":
return (input_str + "\n").encode("utf-8")
return input_str.encode("utf-8") | 1b83a2c75118b03b7af06350e069775c0b877816 | 1,344 |
def convert_from_opencorpora_tag(to_ud, tag: str, text: str):
"""
Конвертировать теги их формата OpenCorpora в Universal Dependencies
:param to_ud: конвертер.
:param tag: тег в OpenCorpora.
:param text: токен.
:return: тег в UD.
"""
ud_tag = to_ud(str(tag), text)
pos = ud_tag.split()[0]
gram = ud_tag.split()[1]
return pos, gram | 0e650cc4976d408ed88ef9280fe3a74261353561 | 1,345 |
import struct
def reg_to_float(reg):
"""convert reg value to Python float"""
st = struct.pack(">L", reg)
return struct.unpack(">f", st)[0] | f4a2d416e880807503f3c0ba0b042fbbecc09064 | 1,346 |
def _as_nested_lists(vertices):
""" Convert a nested structure such as an ndarray into a list of lists. """
out = []
for part in vertices:
if hasattr(part[0], "__iter__"):
verts = _as_nested_lists(part)
out.append(verts)
else:
out.append(list(part))
return out | c69bd2084aa8e76a53adf3e25286a8dd7ae23176 | 1,347 |
def A070939(i: int = 0) -> int:
"""Length of binary representation of n."""
return len(f"{i:b}") | 31b12e493645c3bdf7e636a48ceccff5d9ecc492 | 1,350 |
def centre_to_zeroes(cartesian_point, centre_point):
"""Converts centre-based coordinates to be in relation to the (0,0) point.
PIL likes to do things based on (0,0), and in this project I'd like to keep
the origin at the centre point.
Parameters
----------
cartesian_point : (numeric)
x, y coordinates in terms of the centre
centre_point : (numeric)
x, y coordinates of the centre
"""
x = cartesian_point[0] + centre_point[0]
y = centre_point[1] - cartesian_point[1]
return x, y | f0ddd632650127e3bb1ed766191950ccf7f06d87 | 1,351 |