content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def create_dict(local=None, field=None, **kwargs):
"""
以字典的形式从局部变量locals()中获取指定的变量
:param local: dict
:param field: str[] 指定需要从local中读取的变量名称
:param kwargs: 需要将变量指定额外名称时使用
:return: dict
"""
if field is None or local is None:
return {}
result = {k: v for k, v in local.items() if k in field}
result.update(**kwargs)
return result | 19aceef7f648cc72f29fceba811085cde9d6d587 | 708,233 |
def sum_list_for_datalist(list):
"""
DB에 저장할 때, 기준일로부터 과거 데이터가 존재하지 않을 경우에는
0을 return 한다.
:param list:
:return: float or int
"""
mysum = 0
for i in range(0, len(list)):
if list[i] == 0:
return 0
mysum = mysum + list[i]
return mysum | bae8966f64c642176d92d31c27df691e0f255d6a | 708,234 |
def solve(strs, m, n):
"""
2D 0-1 knapsack
"""
def count(s):
m, n = 0, 0
for c in s:
if c == "0":
m += 1
elif c == "1":
n += 1
return m, n
dp = []
for _ in range(m + 1):
dp.append([0] * (n + 1))
for s in strs:
mi, ni = count(s)
for j in range(m, mi - 1, -1): # reverse!
for k in range(n, ni - 1, -1): # reverse!
dp[j][k] = max(dp[j][k], dp[j - mi][k - ni] + 1)
return dp[m][n] | 3fb2b16fc9059227c0edce1199269988d18cb908 | 708,235 |
def bk_category_chosen_category():
"""Returns chosen category for creating bk_category object."""
return "Bread" | cbf1c933e5c2b69214e828afaab5babdba61dca8 | 708,236 |
def get_book_url(tool_name, category):
"""Get the link to the help documentation of the tool.
Args:
tool_name (str): The name of the tool.
category (str): The category of the tool.
Returns:
str: The URL to help documentation.
"""
prefix = "https://jblindsay.github.io/wbt_book/available_tools"
url = "{}/{}.html#{}".format(prefix, category, tool_name)
return url | daf6c8e0832295914a03b002b548a82e2949612a | 708,237 |
import hashlib
def game_hash(s):
"""Generate hash-based identifier for a game account based on the
text of the game.
"""
def int_to_base(n):
alphabet = "BCDFGHJKLMNPQRSTVWXYZ"
base = len(alphabet)
if n < base:
return alphabet[n]
return int_to_base(n // base) + alphabet[n % base]
return int_to_base(
int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16)
)[-7:] | c218a2607390916117921fe0f68fc23fedd51fc3 | 708,238 |
def count_primes(num):
"""
Write a function that returns the number
of prime numbers that exist up to and including a given number
:param num: int
:return: int
"""
count = 0
lower = int(input())
upper = int(input())
for num in range(lower, upper + 1):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
count += 1
return count | 7a544265f3a7eca9118b0647bc8926c655cdb8ec | 708,239 |
def run_experiment(config):
"""
Run the experiment.
Args:
config: The configuration dictionary.
Returns:
The experiment result.
"""
return None | b12a8a5cbdb03d60ca618826f20c9a731a39fd2a | 708,240 |
def is_english_score(bigrams, word):
"""Calculate the score of a word."""
prob = 1
for w1, w2 in zip("!" + word, word + "!"):
bigram = f"{w1}{w2}"
if bigram in bigrams:
prob *= bigrams[bigram] # / float(bigrams['total'] + 1)
else:
print("%s not found" % bigram)
prob *= 1 # / float(bigrams['total'] + 1)
return prob | 834e28a32806d0599f5df97d978bc6b9c1a51da7 | 708,241 |
import os
import json
def _get_from_url(url):
"""
Note: url is in format like this(following OQMD RESTful API) and the result format should be set to json:
http://oqmd.org/oqmdapi/formationenergy?fields=name,entry_id,delta_e&filter=stability=0&format=json
Namely url should be in the form supported by OQMD RESTful API
"""
os.system("mkdir -p /tmp/pymatflow/third")
#os.system("wget \"%s\" -O /tmp/pymatflow/third/oqmd_restful_api_results.json" % (url))
#os.system("curl \"%s\" -Lo /tmp/pymatflow/third/oqmd_restful_api_results.json" % (url))
# silent output of curl
os.system("curl \"%s\" -s -Lo /tmp/pymatflow/third/oqmd_restful_api_results.json" % (url))
with open("/tmp/pymatflow/third/oqmd_restful_api_results.json", 'r') as fin:
out = json.loads(fin.read())
return out | 3860ca4b73eb6e4842ff7afd48485dd12d6d3e45 | 708,242 |
import win32com.client
def _get_windows_network_adapters():
"""Get the list of windows network adapters."""
wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator')
wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2')
wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter')
network_adapters = []
for adapter in wbem_network_adapters:
if (adapter.NetConnectionStatus == 2 or
adapter.NetConnectionStatus == 7):
adapter_name = adapter.NetConnectionID
mac_address = adapter.MacAddress.lower()
config = adapter.associators_(
'Win32_NetworkAdapterSetting',
'Win32_NetworkAdapterConfiguration')[0]
ip_address = ''
subnet_mask = ''
if config.IPEnabled:
ip_address = config.IPAddress[0]
subnet_mask = config.IPSubnet[0]
#config.DefaultIPGateway[0]
network_adapters.append({'name': adapter_name,
'mac-address': mac_address,
'ip-address': ip_address,
'subnet-mask': subnet_mask})
return network_adapters | 796c25089411633d11b28fdd9c23d900db7005f0 | 708,243 |
import os
def get_graph_names(test_dir):
"""Parse test_dir/*GRAPHFILES and return basenames for all .graph files"""
graph_list = []
GRAPHFILES_files = [f for f in os.listdir(test_dir) if f.endswith("GRAPHFILES")]
for GRAPHFILE in GRAPHFILES_files:
with open(os.path.join(test_dir, GRAPHFILE), 'r') as f:
for l in f.readlines():
l = l.strip()
if not l or l.startswith('#'):
continue
graph_list.append(os.path.basename(l).replace('.graph', ''))
return graph_list | b60f6e5a1b3654e6e7a982902356c18db0e740ae | 708,244 |
def determine_issues(project):
"""
Get the list of issues of a project.
:rtype: list
"""
issues = project["Issue"]
if not isinstance(issues, list):
return [issues]
return issues | 7b8b670e4ad5a7ae49f3541c87026dd603406c9f | 708,245 |
import os
def find_image_files(path=None):
"""
Used to find image files.
Argument:
path - path to directory of 'img.image' files
"""
if path is None:
path = os.getcwd()
folders = []
for folder in os.listdir(path):
if folder.endswith("img.image"):
folders.append(os.path.join(path, folder))
folders.sort()
return folders | f6813672c1619204caa45d21df47289227ab4d5f | 708,246 |
def genus_species_name(genus, species):
"""Return name, genus with species if present.
Copes with species being None (or empty string).
"""
# This is a simple function, centralising it for consistency
assert genus and genus == genus.strip(), repr(genus)
if species:
assert species == species.strip(), repr(species)
return f"{genus} {species}"
else:
return genus | 1fed57c5c87dfd9362262a69429830c7103b7fca | 708,247 |
def N(u,i,p,knots):
"""
u: point for which a spline should be evaluated
i: spline knot
p: spline order
knots: all knots
Evaluates the spline basis of order p defined by knots
at knot i and point u.
"""
if p == 0:
if knots[int(i)] < u and u <=knots[int(i+1)]:
return 1.0
else:
return 0.0
else:
try:
k = ((float((u-knots[int(i)])) / float((knots[int(i+p)] - knots[int(i)]) ))
* N(u,i,p-1,knots))
except ZeroDivisionError:
k = 0.0
try:
q = ((float((knots[int(i+p+1)] - u)) / float((knots[int(i+p+1)] - knots[int(i+1)])))
* N(u,i+1,p-1,knots))
except ZeroDivisionError:
q = 0.0
return float(k + q) | 0cd0756d558ee99b0ed32350860bc27f023fa88b | 708,248 |
def getStopWords(stopWordFileName):
"""Reads stop-words text file which is assumed to have one word per line.
Returns stopWordDict.
"""
stopWordDict = {}
stopWordFile = open(stopWordFileName, 'r')
for line in stopWordFile:
word = line.strip().lower()
stopWordDict[word] = None
return stopWordDict | 8bb85683f257c35de9d04e4993b42cd758a802e6 | 708,249 |
import os
def read_bert_vocab(bert_model_path):
"""读取bert词典"""
dict_path = os.path.join(bert_model_path, 'vocab.txt')
token2idx = {}
with open(dict_path, 'r', encoding='utf-8') as f:
tokens = f.read().splitlines()
for word in tokens:
token2idx[word] = len(token2idx)
return token2idx | f38c82a1a2b8f69b6c10e8d0bfcf8bdf4f63e123 | 708,250 |
import numpy
def upsample2(x):
"""
Up-sample a 2D array by a factor of 2 by interpolation.
Result is scaled by a factor of 4.
"""
n = [x.shape[0] * 2 - 1, x.shape[1] * 2 - 1] + list(x.shape[2:])
y = numpy.empty(n, x.dtype)
y[0::2, 0::2] = 4 * x
y[0::2, 1::2] = 2 * (x[:, :-1] + x[:, 1:])
y[1::2, 0::2] = 2 * (x[:-1, :] + x[1:, :])
y[1::2, 1::2] = x[:-1, :-1] + x[1:, 1:] + x[:-1, 1:] + x[1:, :-1]
return y | 4eb23d668154ac12755c0e65eeff485ac5e5dd23 | 708,251 |
def is_sim_f(ts_kname):
""" Returns True if the TSDist is actually a similarity and not a distance
"""
return ts_kname in ('linear_allpairs',
'linear_crosscor',
'cross_correlation',
'hsdotprod_autocor_truncated',
'hsdotprod_autocor_cyclic') | 11c18983d8d411714ba3147d4734ad77c40ceedf | 708,253 |
from datetime import datetime
import numpy
def get_numbers_of_papers(metrics):
"""
Convert the metrics into a format that is easier to work with. Year-ordered
numpy arrays.
"""
publications = metrics['histograms']['publications']
year, total, year_refereed, refereed = [], [], [], []
y = list(publications['all publications'].keys())
y.sort()
for i in range(len(y)):
k = y[i]
year.append(datetime.strptime(k, '%Y'))
total.append(publications['all publications'][k])
refereed.append(publications['refereed publications'][k])
year, total, refereed = \
numpy.array(year), numpy.array(total), numpy.array(refereed)
return year, total, refereed | ce8b079ea416ff01b4974ea7ae7aa82080321cbb | 708,254 |
def filter_phrase(comments, phrase):
"""Returns list of comments and replies filtered by substring."""
results = []
for comment in comments:
if phrase.lower() in comment.message.lower():
results.append(comment)
for reply in comment.replies:
if phrase.lower() in reply.message.lower():
results.append(reply)
if not results:
return None
return results | 0865163f117550e36b2c21608739649b7b99f825 | 708,255 |
import os
def write_champ_file_geometry(filename, nucleus_num, nucleus_label, nucleus_coord):
"""Writes the geometry data from the quantum
chemistry calculation to a champ v2.0 format file.
Returns:
None as a function value
"""
if filename is not None:
if isinstance(filename, str):
## Write down a geometry file in the new champ v2.0 format
filename_geometry = os.path.splitext("champ_v2_" + filename)[0]+'_geom.xyz'
with open(filename_geometry, 'w') as file:
file.write("{} \n".format(nucleus_num))
# header line printed below
file.write("# Converted from the trexio file using trex2champ converter https://github.com/TREX-CoE/trexio_tools \n")
for element in range(nucleus_num):
file.write("{:5s} {: 0.6f} {: 0.6f} {: 0.6f} \n".format(nucleus_label[element], nucleus_coord[element][0], nucleus_coord[element][1], nucleus_coord[element][2]))
file.write("\n")
file.close()
else:
raise ValueError
# If filename is None, return a string representation of the output.
else:
return None | dfaaddb754e50c4343b60ae3f19f6f7b3af8ee73 | 708,257 |
def modify_color(hsbk, **kwargs):
"""
Helper function to make new colors from an existing color by modifying it.
:param hsbk: The base color
:param hue: The new Hue value (optional)
:param saturation: The new Saturation value (optional)
:param brightness: The new Brightness value (optional)
:param kelvin: The new Kelvin value (optional)
"""
return hsbk._replace(**kwargs) | ecc5118873aaf0e4f63bad512ea61d2eae0f7ead | 708,258 |
def add_logs_to_table_heads(max_logs):
"""Adds log headers to table data depending on the maximum number of logs from trees within the stand"""
master = []
for i in range(2, max_logs + 1):
for name in ['Length', 'Grade', 'Defect']:
master.append(f'Log {i} {name}')
if i < max_logs:
master.append('Between Logs Feet')
return master | 5db494650901bfbb114135da9596b9b453d47568 | 708,259 |
from typing import Optional
def get_measured_attribute(data_model, metric_type: str, source_type: str) -> Optional[str]:
"""Return the attribute of the entities of a source that are measured in the context of a metric.
For example, when using Jira as source for user story points, the points of user stories (the source entities) are
summed to arrive at the total number of user story points.
"""
attribute = (
data_model["sources"].get(source_type, {}).get("entities", {}).get(metric_type, {}).get("measured_attribute")
)
return str(attribute) if attribute else attribute | f15379e528b135ca5d9d36f50f06cb95a145b477 | 708,260 |
import os
import platform
def get_pid_and_server():
"""Find process id and name of server the analysis is running on
Use the platform.uname to find servername instead of os.uname because the latter is not supported on Windows.
"""
pid = os.getpid()
server = platform.uname().node
return f"{pid}@{server}" | 433d1493674e2355554d9bc6e189658e155560de | 708,261 |
from typing import List
def insertion_stack(nums: List[int]) -> List[int]:
""" A helper function that sort the data in an ascending order
Args:
nums: The original data
Returns:
a sorted list in ascending order
"""
left = []
right = []
for num in nums:
while left and left[-1] > num:
right.append(left.pop())
left.append(num)
while right:
left.append(right.pop())
return left | 045e28d763ece3dac9e1f60d50a0d51c43b75664 | 708,262 |
import os
def _is_valid_file(file):
"""Returns whether a file is valid.
This means that it is a file and not a directory, but also that
it isn't an unnecessary dummy file like `.DS_Store` on MacOS.
"""
if os.path.isfile(file):
if not file.startswith('.git') and file not in ['.DS_Store']:
return True
return False | 48f3b2171b6aa2acb93cbcab6eba74f0772fe888 | 708,263 |
def longest_common_substring(s, t):
"""
Find the longest common substring between the given two strings
:param s: source string
:type s: str
:param t: target string
:type t: str
:return: the length of the longest common substring
:rtype: int
"""
if s == '' or t == '':
return 0
f = [[0 for _ in range(len(t) + 1)]
for _ in range(len(s) + 1)]
for i in range(len(s)):
for j in range(len(t)):
if s[i] == t[j]:
f[i + 1][j + 1] = f[i][j] + 1
return max(map(max, f)) | 66aef17a117c6cc96205664f4c603594ca496092 | 708,264 |
import socket
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port | d1a514a47a906c946fa3a8cb4312e71bc4f7570e | 708,266 |
def get_diff_list(small_list, big_list):
"""
Get the difference set of the two list.
:param small_list: The small data list.
:param big_list: The bigger data list.
:return: diff_list: The difference set list of the two list.
"""
# big_list有而small_list没有的元素
diff_list = list(set(big_list).difference(set(small_list)))
return diff_list | f92d20e6edd1f11ca6436a3ada4a6ba71da37457 | 708,267 |
import re
def strip_from_ansi_esc_sequences(text):
"""
find ANSI escape sequences in text and remove them
:param text: str
:return: list, should be passed to ListBox
"""
# esc[ + values + control character
# h, l, p commands are complicated, let's ignore them
seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]"
regex = re.compile(seq_regex)
start = 0
response = ""
for match in regex.finditer(text):
end = match.start()
response += text[start:end]
start = match.end()
response += text[start:len(text)]
return response | 8597654defffbdde33b844a34e95bf7893a36855 | 708,268 |
import re
def applyRegexToList(list, regex, separator=' '):
"""Apply a list of regex to list and return result"""
if type(regex) != type(list):
regex = [regex]
regexList = [re.compile(r) for r in regex]
for r in regexList:
list = [l for l in list if r.match(l)]
list = [l.split(separator) for l in list]
return [i[0] for i in list] | eee1edebf361f9516e7b40ba793b0d13ea3070f3 | 708,269 |
import os
def pathsplit(path):
""" This version, in contrast to the original version, permits trailing
slashes in the pathname (in the event that it is a directory).
It also uses no recursion """
return path.split(os.path.sep) | 1a89994c8ee9bb1b9ef2b9c90575ea2b0ab21c50 | 708,270 |
import os
def get_parent_directory(path):
"""
Get parent directory of the path
"""
return os.path.abspath(os.path.join(path, os.pardir)) | a46ecb9f370076ec975a1823d04b7f2efb0d564d | 708,271 |
import numpy
def convert_bytes_to_ints(in_bytes, num):
"""Convert a byte array into an integer array. The number of bytes forming an integer
is defined by num
:param in_bytes: the input bytes
:param num: the number of bytes per int
:return the integer array"""
dt = numpy.dtype('>i' + str(num))
return numpy.frombuffer(in_bytes, dt) | 38b97fb9d5ecc5b55caf7c9409e4ab4a406a21d7 | 708,272 |
def search_spec(spec, search_key, recurse_key):
"""
Recursively scans spec structure and returns a list of values
keyed with 'search_key' or and empty list. Assumes values
are either list or str.
"""
value = []
if search_key in spec and spec[search_key]:
if isinstance(spec[search_key], str):
value.append(spec[search_key])
else:
value += spec[search_key]
if recurse_key in spec and spec[recurse_key]:
for child_spec in spec[recurse_key]:
value += search_spec(child_spec, search_key, recurse_key)
return sorted(value) | 9d89aacc200e205b0e6cbe49592abfd37158836a | 708,273 |
def min_threshold(x, thresh, fallback):
"""Returns x or `fallback` if it doesn't meet the threshold. Note, if you want to turn a hyper "off" below,
set it to "outside the threshold", rather than 0.
"""
return x if (x and x > thresh) else fallback | e92c17aafb8a7c102152d9f31d0a317b285a0ae6 | 708,274 |
def get_common_count(list1, list2):
"""
Get count of common between two lists
:param list1: list
:param list2: list
:return: number
"""
return len(list(set(list1).intersection(list2))) | c149b49e36e81237b775b0de0f19153b5bcf2f99 | 708,275 |
from os import listdir
def files_with_extension(path: str,extension: str):
"""
Gives a list of the files in the given directory that have the given extension
Parameters
----------
path: str
The full path to the folder where the files are stored
extension: str
The extension of the files
Returns
-------
List[str]
A list containing the files
"""
return [f for f in listdir(path) if f.endswith(extension)] | 18d06303e9b2a734f5fe801847908e2e21b48eae | 708,276 |
import requests
import json
import math
import time
def goods_images(goods_url):
"""
获得商品晒图
Parameters:
goods_url - str 商品链接
Returns:
image_urls - list 图片链接
"""
image_urls = []
productId = goods_url.split('/')[-1].split('.')[0]
# 评论url
comment_url = 'https://sclub.jd.com/comment/productPageComments.action'
comment_params = {'productId':productId,
'score':'0',
'sortType':'5',
'page':'0',
'pageSize':'10',
'isShadowSku':'0',
'fold':'1'}
comment_headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36',
'Referer':goods_url,
'Host': 'sclub.jd.com'}
comment_req = requests.get(url=comment_url, params=comment_params, headers=comment_headers, verify=False)
html = json.loads(comment_req.text)
# 获得晒图个数
imageListCount = html['imageListCount']
# 计算晒图页数,向上取整
pages = math.ceil(imageListCount / 10)
for page in range(1, pages+1):
# 获取晒图图片url
club_url = 'https://club.jd.com/discussion/getProductPageImageCommentList.action'
now = time.time()
now_str = str(now).split('.')
now = now_str[0] + now_str[-1][:3]
club_params = {'productId':productId,
'isShadowSku':'0',
'page':page,
'pageSize':'10',
'_':now}
club_headers = comment_headers
club_req = requests.get(url=club_url, params=club_params, headers=club_headers, verify=False)
html = json.loads(club_req.text)
for img in html['imgComments']['imgList']:
image_urls.append(img['imageUrl'])
# 去重
image_urls = list(set(image_urls))
# 链接合成
image_urls = list(map(lambda x: 'http:'+x, image_urls))
return image_urls | 8ed59e295ebd08788f0083be9941ecd8b09f1d84 | 708,278 |
def delete_index_list(base_list, index_list):
"""
根据index_list删除base_list中指定元素
:param base_list:
:param index_list:
:return:
"""
if base_list and index_list:
return [base_list[i] for i in range(len(base_list)) if (i not in index_list)] | 0dd8960d0efc168df42cabb92147f078da362e5e | 708,279 |
import os
import yaml
def dump_yaml(file_path, data):
"""Dump data to a file.
:param file_path: File path to dump data to
:type file_path: String
:param data: Dictionary|List data to dump
:type data: Dictionary|List
"""
with open(os.path.abspath(os.path.expanduser(file_path)), "w") as f:
yaml.safe_dump(data, f, default_flow_style=False)
return file_path | f1210295f5f947c51df6ef80fc479723e157a84c | 708,281 |
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
:param str mimetype: MIME type
:returns: 4 element tuple for MIME type, subtype, suffix and parameters
:rtype: tuple
Example:
>>> parse_mimetype('text/html; charset=utf-8')
('text', 'html', '', {'charset': 'utf-8'})
"""
if not mimetype:
return '', '', '', {}
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return mtype, stype, suffix, params | a9abfde73528e6f76cca633efe3d4c881dccef82 | 708,282 |
def extract_file_type(file_location:str) -> str:
"""
A function to return the type of file
-> file_location: str = location of a file in string... ex : "C:\\abc\\abc\\file.xyz"
----
=> str: string of the file type, ex : "xyz"
"""
if not isinstance(file_location,str):
raise TypeError("file_location must be a string")
try:
return file_location.rsplit(".", 1)[1]
except IndexError:
raise ValueError(f"Invalid File Location : '{file_location}'") | 091930e1cd285822a6be402eb47ce0457e40b0db | 708,283 |
import torch
def encode_boxes(boxes, im_shape, encode=True, dim_position=64, wave_length=1000, normalize=False, quantify=-1):
""" modified from PositionalEmbedding in:
Args:
boxes: [bs, num_nodes, 4] or [num_nodes, 4]
im_shape: 2D tensor, [bs, 2] or [2], the size of image is represented as [width, height]
encode: bool, whether to encode the box
dim_position: int, the dimension for position embedding
wave_length: the wave length for the position embedding
normalize: bool, whether to normalize the embedded features
quantify: int, if it is > 0, it will be used to quantify the position of objects
"""
batch = boxes.dim() > 2
if not batch:
boxes = boxes.unsqueeze(dim=0)
im_shape = im_shape.unsqueeze(dim=0)
if quantify > 1:
boxes = boxes // quantify
# in this case, the last 2 dims of input data is num_samples and 4.
# we compute the pairwise relative postion embedings for each box
if boxes.dim() == 3: # [bs, num_sample, 4]
# in this case, the boxes should be tlbr: [x1, y1, x2, y2]
device = boxes.device
bs, num_sample, pos_dim = boxes.size(0), boxes.size(1), boxes.size(2) # pos_dim should be 4
x_min, y_min, x_max, y_max = torch.chunk(boxes, 4, dim=2) # each has the size [bs, num_sample, 1]
# handle some invalid box
x_max[x_max<x_min] = x_min[x_max<x_min]
y_max[y_max<y_min] = y_min[y_max<y_min]
cx_a = (x_min + x_max) * 0.5 # [bs, num_sample_a, 1]
cy_a = (y_min + y_max) * 0.5 # [bs, num_sample_a, 1]
w_a = (x_max - x_min) + 1. # [bs, num_sample_a, 1]
h_a = (y_max - y_min) + 1. # [bs, num_sample_a, 1]
cx_b = cx_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
cy_b = cy_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
w_b = w_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
h_b = h_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
delta_x = ((cx_b - cx_a) / w_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
delta_y = ((cy_b - cy_a) / h_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
delta_w = torch.log(w_b / w_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
delta_h = torch.log(h_b / h_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
relative_pos = torch.cat((delta_x, delta_y, delta_w, delta_h), dim=-1) # [bs, num_sample_a, num_sample_b, 4]
# if im_shape is not None:
im_shape = im_shape.unsqueeze(dim=-1) # [bs, 2, 1]
im_width, im_height = torch.chunk(im_shape, 2, dim=1) # each has the size [bs, 1, 1]
x = ((cx_b - cx_a) / im_width).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
y = ((cy_b - cy_a) / im_height).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
# w = ((w_b + w_a) / (2 * im_width)).unsqueeze(dim=-1) - 0.5 # [bs, num_sample_a, num_sample_b, 1]
# h = ((h_b + h_a) / (2 * im_height)).unsqueeze(dim=-1) - 0.5 # [bs, num_sample_a. num_sample_b, 1]
w = ((w_b - w_a) / im_width).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
h = ((h_b - h_a) / im_height).unsqueeze(dim=-1) # [bs, num_sample_a. num_sample_b, 1]
relative_pos = torch.cat((relative_pos, x, y, w, h), dim=-1) # [bs, num_sample_a, num_sample_b, 8]
if not encode:
embedding = relative_pos
else:
position_mat = relative_pos # [bs, num_sample_a, num_sample_b, 8]
pos_dim = position_mat.size(-1)
feat_range = torch.arange(dim_position / (2*pos_dim)).to(device) # [self.dim_position / 16]
dim_mat = feat_range / (dim_position / (2*pos_dim))
dim_mat = 1. / (torch.pow(wave_length, dim_mat)) # [self.dim_position / 16]
dim_mat = dim_mat.view(1, 1, 1, 1, -1) # [1, 1, 1, 1, self.dim_position / 16]
# position_mat = position_mat.view(bs, num_sample, num_sample, pos_dim, -1) # [bs, num_sample_a, num_sample_b, 4, 1]
position_mat = position_mat.unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 8, 1]
position_mat = 100. * position_mat # [bs, num_sample_a, num_sample_b, 8, 1]
mul_mat = position_mat * dim_mat # [bs, num_sample_a, num_sample_b, 8, dim_position / 16]
mul_mat = mul_mat.view(bs, num_sample, num_sample, -1) # [bs, num_sample_a, num_sample_b, dim_position / 2]
sin_mat = torch.sin(mul_mat)# [bs, num_sample_a, num_sample_b, dim_position / 2]
cos_mat = torch.cos(mul_mat)# [bs, num_sample_a, num_sample_b, dim_position / 2]
embedding = torch.cat((sin_mat, cos_mat), -1)# [bs, num_sample_a, num_sample_b, dim_position]
if normalize:
embedding = embedding / torch.clamp(torch.norm(embedding, dim=-1, p=2, keepdim=True), 1e-6)
else:
raise ValueError("Invalid input of boxes.")
if not batch: # 2D tensor, [num_boxes, 4]
embedding = embedding.squeeze(dim=0)
return relative_pos, embedding | 7bc8e2d858391c862538626ea7f3dcc291f807f6 | 708,284 |
import numpy
def getPercentileLevels(h, frac=[0.5, 0.65, 0.95, 0.975]):
"""
Return image levels that corresponds to given percentiles values
Uses the cumulative distribution of the sorted image density values
Hence this works also for any nd-arrays
inputs:
h array
outputs:
res array containing level values
keywords:
frac sample fractions (percentiles)
could be scalar or iterable
default: 50%, 65%, 95%, and 97.5%
"""
if getattr(frac, '__iter__', False):
return numpy.asarray( [getPercentileLevels(h, fk) for fk in frac])
assert( (frac >= 0.) & (frac <1.)), "Expecting a sample fraction in 'frac' and got %f" %frac
# flatten the array to a 1d list
val = h.ravel()
# inplace sort
val.sort()
#reverse order
rval = val[::-1]
#cumulative values
cval = rval.cumsum()
#retrieve the largest indice up to the fraction of the sample we want
ind = numpy.where(cval <= cval[-1]*float(frac))[0].max()
res = rval[ind]
del val, cval, ind, rval
return res | 126d16ab9358d9ec6e72dc653037d9235baef139 | 708,285 |
import subprocess
def lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
Input
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
dict
"""
command = event['command']
res = subprocess.check_output(command.split(' '))
return {
"statusCode": 200,
"body": res
} | 4f266c728487601d522234ce24104ab762fe0147 | 708,286 |
import operator
def calculate_seat_district(district_deputy_number, parties, votes):
"""
Calculate seats for each party in list of parties for a district
Params:
- district_deputy_number: the number of seats for this district
- parties: list of parties
- votes: list of votes for each party in this district
Assume that parties and votes parameters have the same size
Return:
- A tuple represents number of seats for each party. This tuple has same size with parameter 'parties'
"""
party_count = len(parties)
# Each party has been initially allocated 0 seat
# Initialize a list with initial value is 0
# For example, if party_count = 5
# seats will be seats = [0, 0, 0, 0, 0]
seats = [0] * party_count
# N value for each party
# N= V/(s + 1)
# Init N as a copy of votes list
N = votes[:]
while sum(seats) < district_deputy_number:
# Get the maximum value in list of N value and the index of that maximum value
# Note: this below line uses the Python's builtin operator
max_index, max_value = max(enumerate(N), key=operator.itemgetter(1))
# Update the seats list
# increase the seat of the party that has maximum by 1
seats[max_index] += 1
# Update the lagest N with new value
# using the formal: N= V/(s + 1)
N[max_index] = votes[max_index] / (seats[max_index] + 1)
# return as tuple
# Note: It can be returned as list, however, the tuple is better because it's immutable
return tuple(seats) | 035a167c623d14857dcefe01e4304523959857a6 | 708,287 |
def get_clean_url(url):
""" Get a url without the language part, if i18n urls are defined
:param url: a string with the url to clean
:return: a string with the cleaned url
"""
url = url.strip('/')
url = '/' if not url else url
return '/'.join(url.split('/')[1:]) | 9e5d396086d6cc5169c26f6d1645dafd23a3b8d7 | 708,288 |
from datetime import datetime
def cid_to_date(cid):
"""Converts a cid to date string YYYY-MM-DD
Parameters
----------
cid : int
A cid as it is generated by the function ``utils.create_cid()``
Returns
-------
str
A string formated date (e.g. YYYY-MM-DD, 2018-10-01)
"""
return datetime.utcfromtimestamp(
cid/10000000.0
).strftime("%Y-%m-%d") | ab919f9cfd5c56f6fb6b65cbae8731687fc42faf | 708,289 |
import random
def remove_edge_stochastic_function(G, parameter, prob_func, prob_func_kws={}, random_seed=None, copy=True):
"""
Recieves a Graph and p.
p is function of a defined parameter
Returns a degraded Graph
"""
if random_seed is not None:
random.seed(random_seed)
if copy:
G_ = G.copy()
else:
G_ = G
lst = [G.edges[n][parameter] for n in G.edges]
vmax, vmin = max(lst), min(lst)
prob_func_kws['vmax'] = vmax
prob_func_kws['vmin'] = vmin
lst=None
for edge in list(G.edges):
p = prob_func(G.edges[edge][parameter], **prob_func_kws)
if random.random()<=p:
G_.remove_edge(*edge)
return(G_) | e4f49a6e512f7ad0e86c0138bf76affa330ba7a5 | 708,290 |
def get_sh_type(sh_type):
"""Get the section header type."""
if sh_type == 0:
return 'SHT_NULL'
elif sh_type == 1:
return 'SHT_PROGBITS'
elif sh_type == 2:
return 'SHT_SYMTAB'
elif sh_type == 3:
return 'SHT_STRTAB'
elif sh_type == 4:
return 'SHT_RELA'
elif sh_type == 5:
return 'SHT_HASH'
elif sh_type == 6:
return 'SHT_DYNAMIC'
elif sh_type == 7:
return 'SHT_NOTE'
elif sh_type == 8:
return 'SHT_NOBITS'
elif sh_type == 9:
return 'SHT_REL'
elif sh_type == 10:
return 'SHT_SHLIB'
elif sh_type == 11:
return 'SHT_DYNSYM'
elif sh_type == 14:
return 'SHT_INIT_ARRAY'
elif sh_type == 15:
return 'SHT_FINI_ARRAY'
elif sh_type == 16:
return 'SHT_PREINIT_ARRAY'
elif sh_type == 17:
return 'SHT_GROUP'
elif sh_type == 18:
return 'SHT_SYMTAB_SHNDX'
elif sh_type == 19:
return 'SHT_NUM'
elif sh_type == 1610612736:
return 'SHT_LOOS'
else:
print('Unable to match {} to a sh_type.'.format(sh_type))
raise ValueError | 0d95e651cc817f48178e45373b822be2eb32fbaf | 708,291 |
def read_gmpe_file(resid_file, period):
"""
Reads the gmpe residuals file and returns all the data
"""
gmpe_data = []
# Read residuals file and get information we need
input_file = open(resid_file, 'r')
# Look over header and figure out which column contains the period
# we need to plot
header = input_file.readline()
header = header.strip()
items = header.split()
index = -1
for idx, item in enumerate(items):
try:
val = float(item)
if val == period:
# Found period, save index
index = idx
break
except:
pass
if index < 0:
# If we don't have this period, nothing to do
print("Residuals file %s does not have data for period %f" %
(resid_file, period))
# Close input file
input_file.close()
# Return empty sets
return gmpe_data
# Read the rest of the file
# Index #2 has station name
# Index #7 has distance
for line in input_file:
items = line.split()
stat = items[2]
dist = items[7]
value = items[index]
gmpe_data.append((stat, dist, value))
# Done reading the file
input_file.close()
return gmpe_data | c7cb325f6c40cc23ae8fa017a6ef924fa7df2c4e | 708,292 |
import os
def remove_sep(path, new_sep='--'):
"""Convert a real path into pseudo-path."""
return path.replace(os.sep, new_sep) | b7757ee861cd58dbbf2526455831ea6c5613fe27 | 708,293 |
import json
def read_socket(sock, buf_len, echo=True):
""" Read data from socket and return it in JSON format """
reply = sock.recv(buf_len).decode()
try:
ret = json.loads(reply)
except json.JSONDecodeError:
print("Error in reply: ", reply)
sock.close()
raise
if echo:
print(json.dumps(ret))
return ret | 07d7100ed8c1c9d22307ce293e10b2a0cd5849c6 | 708,294 |
def get_num_channels(inputs):
""" Get number of channels in one tensor. """
return inputs.shape[1] | 6fb42e60714dc81f03b29ad87b73b41027056472 | 708,295 |
import math
def round_vzeros(v,d=10) :
"""Returns input vector with rounded to zero components
which precision less than requested number of digits.
"""
prec = pow(10,-d)
vx = v[0] if math.fabs(v[0]) > prec else 0.0
vy = v[1] if math.fabs(v[1]) > prec else 0.0
vz = v[2] if math.fabs(v[2]) > prec else 0.0
return vx,vy,vz | aa16175bf1176383ef255460767502104be2566e | 708,296 |
import re
import shlex
def read_cloudflare_api_file(prog, file, state):
"""Read the input file for Cloudflare login details.
Args:
prog (State): modified if errors encountered in opening or reading
the file.
file (str): the file to read.
state (ConfigState): to record config file syntax errors.
Returns:
list(str): returns a list of Cloudflare login parameters
(email and key) where a line in the file 'X = Y' is converted
to: 'X:Y'. No checks on the input to any parameters (i.e. 'Y')
are done here: only the list is constructed. If ANY errors
are encountered, 'None' is returned.
"""
try:
with open(str(file), "r") as f:
raw = f.read().splitlines()
except FileNotFoundError as ex:
prog.log.error(
"cloudflare API file '{}' not found".format(ex.filename))
return None
except OSError as ex:
prog.log.error(
"reading cloudflare API file '{}' failed: {}".format(
ex.filename, ex.strerror.lower()))
return None
allowed_params = {'dns_cloudflare_email': 'email',
'dns_cloudflare_api_key': 'key'}
errors = False
ret = []
linepos = 0
for l in raw:
linepos += 1
match = re.match(r'^\s*(#.*)?$', l)
if match:
continue
match = re.match(
r'\s*(?P<param>\w+)\s*=\s*(?P<input>[^#]*)(\s*|\s#.*)$', l)
if match:
param = match.group('param')
try:
inputs = shlex.split(match.group('input'))
except ValueError:
state.add_error(prog, "cloudflare API file '{}' has malformed expression on line {}".format(file, linepos))
errors = True
continue
if param in allowed_params:
if len(inputs) != 1:
state.add_error(prog, "cloudflare API file '{}': malformed '{}' command on line {}".format(file, param, linepos))
errors = True
continue
ret += [ '{}:{}'.format(allowed_params[param], inputs[0]) ]
continue
state.add_error(prog, "cloudflare API file '{}': unrecognized command on line {}: '{}'".format(file, linepos, param))
errors = True
continue
state.add_error(prog, "cloudflare API file '{}' has malformed expression on line {}".format(file, linepos))
errors = True
if errors:
return None
return ret | 39d5fe28f348e9e3285f55cff22f025a86f41715 | 708,297 |
def get_specific_label_dfs(raw_df, label_loc):
"""
Purpose: Split the instances of data in raw_df based on specific labels/classes
and load them to a dictionary structured -> label : Pandas Dataframe
Params: 1. raw_df (Pandas Dataframe):
- The df containing data
2. label_loc (String):
- The location where the output labels are stored in 1. raw_df
Returns: A dictionary structured -> label : Pandas Dataframe
"""
labels = list(raw_df[label_loc].unique())
# a list of dataframes storing only instances of data belonging to one specific class/label
label_dataframes = {}
for label in labels:
label_dataframes[label] = raw_df.loc[raw_df[label_loc] == label]
return label_dataframes | 756f03f845da64f6fd5534fb786966edb8610a13 | 708,298 |
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x) | f87c15df408c9307c82a7bc0ab7bb700cac71f41 | 708,299 |
def get_axis(array, axis, slice_num):
"""Returns a fixed axis"""
slice_list = [slice(None)] * array.ndim
slice_list[axis] = slice_num
slice_data = array[tuple(slice_list)].T # transpose for proper orientation
return slice_data | 558d4f8f8725c752c225e6958881fc18eeeab35e | 708,301 |
import os
def test_integrity(param_test):
"""
Test integrity of function
"""
# open result file
f = open(os.path.join(param_test.path_output, 'ernst_angle.txt'), 'r')
angle_result = float(f.read())
f.close()
# compare with GT
if abs(angle_result - param_test.angle_gt) < param_test.threshold:
param_test.output += '--> PASSED'
else:
param_test.output += '--> FAILED'
param_test.status = 99
return param_test | 7498c00ec62acb9006cb8ac8f041fc64647140e6 | 708,302 |
import pandas as pd
def expand_name_df(df,old_col,new_col):
"""Takes a dataframe df with an API JSON object with nested elements in old_col,
extracts the name, and saves it in a new dataframe column called new_col
Parameters
----------
df : dataframe
old_col : str
new_col : str
Returns
-------
df : dataframe
"""
def expand_name(nested_name):
"""Takes an API JSON object with nested elements and extracts the name
Parameters
----------
nested_name : JSON API object
Returns
-------
object_name : str
"""
if pd.isnull(nested_name):
object_name = 'Likely Missing'
else:
object_name = nested_name['name']
return object_name
df[new_col] = df[old_col].apply(expand_name)
return df | d39209f71719afa0301e15d95f31d98b7949f6b3 | 708,303 |
import itertools
def expand_set(mySet):
""" pass in a set of genome coords, and it will 'expand' the indels
within the set by adding +/- 3 bp copies for each one """
returnSet = []
for entry in mySet:
l0 = []
l1 = []
try:
sub0 = entry.split('-')[0] # split on `-`
sub1 = entry.split('-')[1] # this guy is good
sub00 = sub0.split(':')[1] # split on :, need to get rid of chrom
chrom = sub0.split(':')[0]
if sub00 != sub1: # got an indel
sub00_1 = int(sub00) + 1
sub00_2 = int(sub00) + 2
sub00_3 = int(sub00) + 3
sub00_4 = int(sub00) - 1
sub00_5 = int(sub00) - 2
sub00_6 = int(sub00) - 3
l0.extend((sub00_1, sub00_2, sub00_3, sub00_4, sub00_5, sub00_6))
try:
sub1_1 = int(sub1) + 1
sub1_2 = int(sub1) + 2
sub1_3 = int(sub1) + 3
sub1_4 = int(sub1) - 1
sub1_5 = int(sub1) - 2
sub1_6 = int(sub1) - 3
l1.extend((sub1_1, sub1_2, sub1_3, sub1_4, sub1_5, sub1_6))
except ValueError:
continue
coord_combos = list(itertools.product(l0, l1))
for pair in coord_combos:
toAdd = chrom + ':' + str(pair[0]) + '-' + str(pair[1])
returnSet.append(toAdd)
else:
returnSet.append(entry)
except IndexError:
continue
return returnSet | 4ccbff705654b5f5b89c59bb13df9fad6cba42db | 708,304 |
def user_dss_clients(dss_clients, dss_target):
"""
Fixture that narrows down the dss clients to only the ones that are relevant considering the curent DSS target.
Args:
dss_clients (fixture): All the instanciated dss client for each user and dss targets
dss_target (fixture): The considered DSS target for the test to be executed
Returns:
A dict of dss client instances for the current DSS target and each of its specified users.
"""
return dss_clients[dss_target] | 7d418b49b68d7349a089046837f3c8351c0dcc67 | 708,306 |
def preprocess(arr):
"""Preprocess image array with simple normalization.
Arguments:
----------
arr (np.array): image array
Returns:
--------
arr (np.array): preprocessed image array
"""
arr = arr / 255.0
arr = arr * 2.0 - 1.0
return arr | 3bccf2f4433c4da62954db4f25f5e9bfabc03c3a | 708,307 |
def closestMedioidI(active_site, medioids, distD):
"""
returns the index of the closest medioid in medioids to active_site
input: active_site, an ActiveSite instance
medioids, a list of ActiveSite instances
distD, a dictionary of distances
output: the index of the ActiveSite closest to active_site in medioids
"""
closest = (float('Inf'), None)
for i, medioid in enumerate(medioids):
thisDist = distD[frozenset([active_site, medioid])]
if thisDist < closest[0]:
closest = (thisDist, i)
return closest[1] | 379f98a84751c0a392f8f9b1703b89b299979676 | 708,308 |
from typing import Dict
from typing import Any
import traceback
import sys
def watchPoint(filename, lineno, event="call"):
"""whenever we hit this line, print a stack trace. event='call'
for lines that are function definitions, like what a profiler
gives you.
Switch to 'line' to match lines inside functions. Execution speed
will be much slower."""
seenTraces: Dict[Any, int] = {} # trace contents : count
def trace(frame, ev, arg):
if ev == event:
if (frame.f_code.co_filename, frame.f_lineno) == (filename, lineno):
stack = ''.join(traceback.format_stack(frame))
if stack not in seenTraces:
print("watchPoint hit")
print(stack)
seenTraces[stack] = 1
else:
seenTraces[stack] += 1
return trace
sys.settrace(trace)
# atexit, print the frequencies? | 5c7017a180e254f5651c6cf737ca798d570d669c | 708,309 |
def no_op_job():
"""
A no-op parsl.python_app to return a future for a job that already
has its outputs.
"""
return 0 | ad8d6379ba35dae14ce056d9900fb6e62c769d85 | 708,310 |
def _to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, str):
out = string
else:
out = string.decode(encoding)
return out | b50fd0fc62b2cfc024c847b98e1f85b4b67d07e3 | 708,311 |
import argparse
def parse_cli_args() -> argparse.Namespace:
"""
Parse arguments passed via Command Line Interface (CLI).
:return:
namespace with arguments
"""
parser = argparse.ArgumentParser(description='Algorithmic composition of dodecaphonic music.')
parser.add_argument(
'-c', '--config_path', type=str, default=None, help='path to configuration file'
)
cli_args = parser.parse_args()
return cli_args | 9014ee342b810ec1b63f7ed80811f55b7ed4d00f | 708,313 |
def find_consumes(method_type):
"""
Determine mediaType for input parameters in request body.
"""
if method_type in ('get', 'delete'):
return None
return ['application/json'] | 785e70e41629b0386d8b86f247afaf5bff3b7ba9 | 708,314 |
def preprocess(text):
""" Simple Arabic tokenizer and sentencizer. It is a space-based tokenizer. I use some rules to handle
tokenition exception like words containing the preposition 'و'. For example 'ووالدته' is tokenized to 'و والدته'
:param text: Arabic text to handle
:return: list of tokenized sentences
"""
try:
text = text.decode('utf-8')
except(UnicodeDecodeError, AttributeError):
pass
text = text.strip()
tokenizer_exceptions = ["وظف", "وضعها", "وضعه", "وقفنا", "وصفوها", "وجهوا", "والدته", "والده", "وادي", "وضعية",
"واجهات", "وفرتها", "وقاية", "وفا", "وزيرنا", "وزارتي", "وجهاها", "واردة", "وضعته",
"وضعتها", "وجاهة", "وهمية", "واجهة", "واضعاً", "واقعي", "ودائع", "واعدا", "واع", "واسعا",
"ورائها", "وحدها", "وزارتي", "وزارتي", "والدة", "وزرائها", "وسطاء", "وليامز", "وافق",
"والدها", "وسم", "وافق", "وجهها", "واسعة", "واسع", "وزنها", "وزنه",
"وصلوا", "والدها", "وصولاً", "وضوحاً", "وجّهته", "وضعته", "ويكيليكس", "وحدها", "وزيراً",
"وقفات", "وعر", "واقيًا", "وقوف", "وصولهم", "وارسو", "واجهت", "وقائية", "وضعهم",
"وسطاء", "وظيفته", "ورائه", "واسع", "ورط", "وظفت", "وقوف", "وافقت", "وفدًا", "وصلتها",
"وثائقي", "ويليان", "وساط", "وُقّع", "وَقّع", "وخيمة", "ويست", "والتر", "وهران", "ولاعة",
"ولايت", "والي", "واجب", "وظيفتها", "ولايات", "واشنطن", "واصف",
"وقح", "وعد", "وقود", "وزن", "وقوع", "ورشة", "وقائع", "وتيرة", "وساطة", "وفود", "وفات",
"وصاية", "وشيك", "وثائق", "وطنية", "وجهات", "وجهت", "وعود", "وضعهم", "وون", "وسعها", "وسعه",
"ولاية", "واصفاً", "واصلت", "وليان", "وجدتها", "وجدته", "وديتي", "وطأت", "وطأ", "وعودها",
"وجوه", "وضوح", "وجيز", "ورثنا", "ورث", "واقع", "وهم", "واسعاً", "وراثية", "وراثي", "والاس",
"واجهنا", "وابل", "ويكيميديا", "واضحا", "واضح", "وصفته", "واتساب", "وحدات", "ون",
"وورلد", "والد", "وكلاء", "وتر", "وثيق", "وكالة", "وكالات", "و احدة", "واحد", "وصيته",
"وصيه", "ويلمينغتون", "ولد", "وزر", "وعي", "وفد", "وصول", "وقف", "وفاة", "ووتش", "وسط",
"وزراء", "وزارة", "ودي", "وصيف", "ويمبلدون", "وست", "وهج", "والد", "وليد", "وثار",
"وجد", "وجه", "وقت", "ويلز", "وجود", "وجيه", "وحد", "وحيد", "ودا", "وداد", "ودرو",
"ودى", "وديع", "وراء", "ورانس", "ورث", "ورَّث", "ورد", "وردة", "ورق", "ورم", "وزير",
"وسام", "وسائل", "وستون", "وسط", "وسن", "وسيط", "وسيلة", "وسيم", "وصاف", "وصف", "وصْفَ",
"وصل", "وضع", "وطن", "وعاء", "وفاء", "وفق", "وفيق", "وقت", "وقع", "وكال", "وكيل",
"ولاء", "ولف", "وهب", "وباء", "ونستون", "وضح", "وجب", "وقّع", "ولنغتون", "وحش",
"وفر", "ولادة", "ولي", "وفيات", "وزار", "وجّه", "وهماً", "وجَّه", "وظيفة", "وظائف", "وقائي"]
sentence_splitter_exceptions = ["د.", "كي.", "في.", "آر.", "بى.", "جى.", "دى.", "جيه.", "ان.", "ال.", "سى.", "اس.",
"اتش.", "اف."]
sentence_splitters = ['.', '!', '؟', '\n']
text = text.replace('،', ' ، ')
text = text.replace('*', ' * ')
text = text.replace('’', ' ’ ')
text = text.replace('‘', ' ‘ ')
text = text.replace(',', ' , ')
text = text.replace('(', ' ( ')
text = text.replace(')', ' ) ')
text = text.replace('/', ' / ')
text = text.replace('[', ' [ ')
text = text.replace(']', ' ] ')
text = text.replace('|', ' | ')
text = text.replace('؛', ' ؛ ')
text = text.replace('«', ' « ')
text = text.replace('»', ' » ')
text = text.replace('!', ' ! ')
text = text.replace('-', ' - ')
text = text.replace('“', ' “ ')
text = text.replace('”', ' ” ')
text = text.replace('"', ' " ')
text = text.replace('؟', ' ؟ ')
text = text.replace(':', ' : ')
text = text.replace('…', ' … ')
text = text.replace('..', ' .. ')
text = text.replace('...', ' ... ')
text = text.replace('\'', ' \' ')
text = text.replace('\n', ' \n ')
text = text.replace(' ', ' ')
tokens = text.split()
for i, token in enumerate(tokens):
if token[-1] in sentence_splitters:
is_exceptions = token in sentence_splitter_exceptions
if not is_exceptions:
tokens[i] = token[:-1] + ' ' + token[-1] + 'SENT_SPLITTER'
tokens = ' '.join(tokens).split()
for i, token in enumerate(tokens):
if token.startswith('و'):
is_exceptions = [token.startswith(exception) and len(token) <= len(exception) + 1 for exception in
tokenizer_exceptions]
if True not in is_exceptions:
tokens[i] = token[0] + ' ' + token[1:]
text = (' '.join(tokens))
text = text.replace(' وال', ' و ال')
text = text.replace(' لل', ' ل ل')
text = text.replace(' لإ', ' ل إ')
text = text.replace(' بالأ', ' ب الأ')
text = text.replace('وفقا ل', 'وفقا ل ')
text = text.replace('نسبة ل', 'نسبة ل ')
sentences = text.split('SENT_SPLITTER')
return sentences | 48a44391413045a49d6d9f2dff20dcd89734b4f2 | 708,315 |
def login(client, password="pass", ):
"""Helper function to log into our app.
Parameters
----------
client : test client object
Passed here is the flask test client used to send the request.
password : str
Dummy password for logging into the app.
Return
-------
post request object
The test client is instructed to send a post request to the /login
route. The request contains the fields values to be posted by the form.
"""
return client.post('/login',
data=dict(pass_field=password, remember_me=True),
follow_redirects=True) | 5adca2e7d54dabe47ae92f0bcebb93e0984617b1 | 708,316 |
import argparse
def create_parser():
""" Create argparse object for this CLI """
parser = argparse.ArgumentParser(
description="Remove doubled extensions from files")
parser.add_argument("filename", metavar="file",
help="File to process")
return parser | c5acd1d51161d7001d7a6842fa87ff0cf61a03ef | 708,317 |
import subprocess
def _GetLastAuthor():
"""Returns a string with the author of the last commit."""
author = subprocess.check_output(['git', 'log',
'-1',
'--pretty=format:"%an"']).splitlines()
return author | 82159cf4d882d6cace29802892dacda1bfe6b6b2 | 708,318 |
def mock_datasource_http_oauth2(mock_datasource):
"""Mock DataSource object with http oauth2 credentials"""
mock_datasource.credentials = b"client_id: FOO\nclient_secret: oldisfjowe84uwosdijf"
mock_datasource.location = "http://foo.com"
return mock_datasource | 8496f6b9ac60af193571f762eb2ea925915a1223 | 708,319 |
import sys
import os
import imp
def _find_module(module):
"""Find module using imp.find_module.
While imp is deprecated, it provides a Python 2/3 compatible
interface for finding a module. We use the result later to load
the module with imp.load_module with the '__main__' name, causing
it to execute.
The non-deprecated method of using importlib.util.find_spec and
loader.execute_module is not supported in Python 2.
The _find_module implementation uses a novel approach to bypass
imp.find_module's requirement that package directories contain
__init__.py/__init__.pyc markers. This lets users specify
namespace packages in main modules, which are not otherwise
supported by imp.find_module.
"""
parts = module.split(".")
module_path = parts[0:-1]
module_name_part = parts[-1]
# See function docstring for the rationale of this algorithm.
for sys_path_item in sys.path:
cur_path = os.path.join(sys_path_item, *module_path)
try:
return imp.find_module(module_name_part, [cur_path])
except ImportError:
pass
raise ImportError("No module named %s" % module) | b76b72cfc666e78b5b880c95bdc196b469722822 | 708,320 |
import pickle
def load_dataset():
"""
load dataset
:return: dataset in numpy style
"""
data_location = 'data.pk'
data = pickle.load(open(data_location, 'rb'))
return data | 9467826bebfc9ca3ad1594904e9f3195e345c065 | 708,321 |
def generateFromSitePaymentObject(signature: str, account_data: dict, data: dict)->dict:
"""[summary]
Creates object for from site chargment request
Args:
signature (str): signature hash string
account_data (dict): merchant_account: str
merchant_domain: str
data (dict): order + personal data to create charge
orderReference (str): timestamp
amount (float): order total amount
currency (str): 'USD', 'UAH', 'RUB'
card (str): user card number
expMonth (str): card expires month
expYear (str): card expires year
cardCvv (str): card cvv
cardHolder (str): full name of card holder "Test test"
productName (list[str]): product names list
productPrice (list[float]): product price list
productCount (list[int]): product count list
clientFirstName (str): client first name
clientLastName (str): client last name
clientCountry (str): client country
clientEmail (str): client email
clientPhone (str): client phone
Returns:
dict: [description]
"""
return {
"transactionType":"CHARGE",
'merchantAccount': account_data['merchant_account'],
"merchantAuthType":"SimpleSignature",
'merchantDomainName': account_data['merchant_domain'],
"merchantTransactionType":"AUTH",
"merchantTransactionSecureType": "NON3DS",
'merchantSignature': signature,
"apiVersion":1,
'orderReference': str(data['orderReference']),
'orderDate': str(data['orderReference']),
"amount":data["amount"],
'currency': data['currency'],
"card":data['card'],
"expMonth":data['expMonth'],
"expYear":data['expYear'],
"cardCvv":data['cardCvv'],
"cardHolder":data['cardHolder'],
'productName': list(map(str, data['productName'])),
'productPrice': list(map(float, data['productPrice'])),
'productCount': list(map(int, data['productCount'])),
"clientFirstName":data['clientFirstName'],
"clientLastName":data['clientLastName'],
"clientCountry":data['clientCountry'],
"clientEmail":data['clientEmail'],
"clientPhone":data['clientPhone'],
} | 149434694e985956dede9bf8b6b0da1215ac9963 | 708,322 |
import argparse
import sys
def parse_args():
"""Parse command-line args.
"""
parser = argparse.ArgumentParser(description = 'Upload (JSON-encoded) conformance resources from FHIR IGPack tar archive.', add_help = False)
parser.add_argument('-h', '--help', action = 'store_true', help = 'show this help message and exit')
parser.add_argument('-i', '--igpack', help = 'IGPack filename (e.g. us-core-v3.1.1-package.tgz)')
parser.add_argument('-t', '--target', help = 'FHIR API base URL for target server (e.g. http://localhost:8080/r4)')
args = parser.parse_args()
usage = False
error = False
if getattr(args, 'help'):
usage = True
else:
for arg in vars(args):
if getattr(args, arg) == None:
print('Error - missing required argument: --{}'.format(arg), file=sys.stderr, flush=True)
error = True
if usage or error:
parser.print_help()
print()
print('Additionally, if the ACCESS_TOKEN environment variable is defined,')
print('its value will be used as an OAuth bearer token for the FHIR API.', flush=True)
if error:
raise RuntimeError('Command-line argument error.')
return args | 7c0ae02e07706ef212417ee7d0c4dd11a1de945c | 708,323 |
def user_0post(users):
"""
Fixture that returns a test user with 0 posts.
"""
return users['user2'] | 5401e7f356e769b5ae68873f2374ef74a2d439c6 | 708,324 |
import os
def initialize():
"""
Initialize some parameters, such as API key
"""
api_key = os.environ.get("api_key") # None when not exist
if api_key and len(api_key) == 64: # length of a key should be 64
return api_key
print("Please set a valid api_key in the environment variables.")
exit() | 2589aeea4db2d1d1f20de03bc2425e1835eb2f69 | 708,325 |
def convert_millis(track_dur_lst):
""" Convert milliseconds to 00:00:00 format """
converted_track_times = []
for track_dur in track_dur_lst:
seconds = (int(track_dur)/1000)%60
minutes = int(int(track_dur)/60000)
hours = int(int(track_dur)/(60000*60))
converted_time = '%02d:%02d:%02d' % (hours, minutes, seconds)
converted_track_times.append(converted_time)
return converted_track_times | 3d5199da01529f72b7eb6095a26e337277f3c2c9 | 708,327 |
def sync_xlims(*axes):
"""Synchronize the x-axis data limits for multiple axes. Uses the maximum
upper limit and minimum lower limit across all given axes.
Parameters
----------
*axes : axis objects
List of matplotlib axis objects to format
Returns
-------
out : yxin, xmax
The computed bounds
"""
xmins, xmaxs = zip(*[ax.get_xlim() for ax in axes])
xmin = min(xmins)
xmax = max(xmaxs)
for ax in axes:
ax.set_xlim(xmin, xmax)
return xmin, xmax | a377877a9647dfc241db482f8a2c630fe3eed146 | 708,328 |
def sanitize_app_name(app):
"""Sanitize the app name and build matching path"""
app = "".join(c for c in app if c.isalnum() or c in ('.', '_')).rstrip().lstrip('/')
return app | fca922d8b622baa1d5935cd8eca2ffca050a4c86 | 708,329 |
def total_length(neurite):
"""Neurite length. For a morphology it will be a sum of all neurite lengths."""
return sum(s.length for s in neurite.iter_sections()) | 854429e073eaea49c168fb0f9e381c71d7a7038a | 708,330 |
def main_epilog() -> str:
"""
This method builds the footer for the main help screen.
"""
msg = "To get help on a specific command, see `conjur <command> -h | --help`\n\n"
msg += "To start using Conjur with your environment, you must first initialize " \
"the configuration. See `conjur init -h` for more information."
return msg | ecf4167535b5f1e787d286a3b2194816790a7e6a | 708,331 |
def windowing_is(root, *window_sys):
"""
Check for the current operating system.
:param root: A tk widget to be used as reference
:param window_sys: if any windowing system provided here is the current
windowing system `True` is returned else `False`
:return: boolean
"""
windowing = root.tk.call('tk', 'windowingsystem')
return windowing in window_sys | fd021039686b1971f8c5740beb804826a7afdf80 | 708,332 |
def init_columns_entries(variables):
"""
Making sure we have `columns` & `entries` to return, without effecting the original objects.
"""
columns = variables.get('columns')
if columns is None:
columns = [] # Relevant columns in proper order
if isinstance(columns, str):
columns = [columns]
else:
columns = list(columns)
entries = variables.get('entries')
if entries is None:
entries = [] # Entries of dict with relevant columns
elif isinstance(entries, dict):
entries = [entries]
else:
entries = list(entries)
return columns, entries | 49a12b0561d0581785c52d9474bc492f2c64626c | 708,333 |
def check_new_value(new_value: str, definition) -> bool:
"""
checks with definition if new value is a valid input
:param new_value: input to set as new value
:param definition: valid options for new value
:return: true if valid, false if not
"""
if type(definition) is list:
if new_value in definition:
return True
else:
return False
elif definition is bool:
if new_value == "true" or new_value == "false":
return True
else:
return False
elif definition is int:
try:
int(new_value)
return True
except ValueError:
return False
elif definition is float:
try:
float(new_value)
return True
except ValueError:
return False
elif definition is str:
return True
else:
# We could not validate the type or values so we assume it is incorrect
return False | d7204c7501e713c4ce8ecaeb30239763c13c1f18 | 708,334 |
def SplitGeneratedFileName(fname):
"""Reverse of GetGeneratedFileName()
"""
return tuple(fname.split('x',4)) | 0210361d437b134c3c24a224ab93d2ffdcfc32ec | 708,335 |
def volta(contador, quantidade):
"""
Volta uma determinada quantidade de caracteres
:param contador: inteiro utilizado para determinar uma posição na string
:param quantidade: inteiro utilizado para determinar a nova posição na string
:type contador: int
:type quantidade: int
:return: retorna o novo contador
:rtype: int
"""
return contador - quantidade | 4183afebdfc5273c05563e4675ad5909124a683a | 708,336 |
import json
def load_configuration():
"""
This function loads the configuration from the
config.json file and then returns it.
Returns: The configuration
"""
with open('CONFIG.json', 'r') as f:
return json.load(f) | 91eae50d84ec9e4654ed9b8bcfa35215c8b6a7c2 | 708,338 |
import configparser
import os
import sys
def config_parse(profile_name):
"""Parse the profile entered with the command line. This profile is in the profile.cfg file.
These parameters are used to automate the processing
:param profile_name: Profile's name"""
config = configparser.ConfigParser()
config.read(os.path.dirname(sys.argv[0]) + "\\profile.cfg")
folder_string = config.get(profile_name, "folder_names")
folder_string = [i.strip() for i in folder_string.split(",")]
cam_names = config.get(profile_name, "cam_names")
cam_names = [i.strip() for i in cam_names.split(",")]
cam_bearing = config.get(profile_name, "cam_bearing")
cam_bearing = [int(i.strip()) for i in cam_bearing.split(",")]
cam_log_count = int(config.get(profile_name, "cam_log_count"))
distance_from_center = float(config.get(profile_name, "distance_from_center"))
min_pic_distance = float(config.get(profile_name, "min_pic_distance"))
try:
cam_log_position = config.get(profile_name, "cam_log_position")
cam_log_position = [int(i.strip()) for i in cam_log_position.strip(",")]
except:
cam_log_position = list(range(len(cam_names)))
return folder_string, cam_names, cam_log_position, cam_bearing, cam_log_count, distance_from_center, min_pic_distance | 45e56c4a5d55b46d11bf9064c6b72fed55ffa4c9 | 708,339 |
import base64
def data_uri(content_type, data):
"""Return data as a data: URI scheme"""
return "data:%s;base64,%s" % (content_type, base64.urlsafe_b64encode(data)) | f890dc1310e708747c74337f5cfa2d6a31a23fc0 | 708,340 |
def next_line(ionex_file):
"""
next_line
Function returns the next line in the file
that is not a blank line, unless the line is
'', which is a typical EOF marker.
"""
done = False
while not done:
line = ionex_file.readline()
if line == '':
return line
elif line.strip():
return line | 053e5582e5146ef096d743973ea7069f19ae6d4d | 708,341 |
import numpy
def levup(acur, knxt, ecur=None):
"""
LEVUP One step forward Levinson recursion
Args:
acur (array) :
knxt (array) :
Returns:
anxt (array) : the P+1'th order prediction polynomial based on the P'th
order prediction polynomial, acur, and the P+1'th order
reflection coefficient, Knxt.
enxt (array) : the P+1'th order prediction prediction error, based on the
P'th order prediction error, ecur.
References:
P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3.
"""
if acur[0] != 1:
raise ValueError(
'At least one of the reflection coefficients is equal to one.')
acur = acur[1:] # Drop the leading 1, it is not needed
# Matrix formulation from Stoica is used to avoid looping
anxt = numpy.concatenate((acur, [0])) + knxt * numpy.concatenate(
(numpy.conj(acur[-1::-1]), [1]))
enxt = None
if ecur is not None:
# matlab version enxt = (1-knxt'.*knxt)*ecur
enxt = (1. - numpy.dot(numpy.conj(knxt), knxt)) * ecur
anxt = numpy.insert(anxt, 0, 1)
return anxt, enxt | 182102d03369d23d53d21bae7209cf49d2caecb4 | 708,342 |