content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import json
import requests
import time
def get_county_data():
"""Get the raw data from coronavirus-tracker-api.herokuapp.com."""
url = ('https://coronavirus-tracker-api.herokuapp.com/v2/locations?source=csbs')
raw_data = None
while raw_data is None:
try:
raw_data = json.loads(requests.request('GET', url, verify=False).text)
except:
print('API Get for county-data failed.')
pass
time.sleep(5) # If HTTP Request fails, wait 5s and try again.
return raw_data | 33404a65e6242b7416304f7194dc2a5c7f073d5d | 709,014 |
import math
def angle_difference(angle1, angle2):
"""
Calculates the difference between the given angles in clockwise direction as radians.
:param angle1: float
:param angle2: float
:return: float; between 0 and 2*Pi
"""
if (angle1 > 0 and angle2 >= 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif (angle1 >= 0 and angle2 > 0) and angle1 < angle2:
return 2 * math.pi + angle1 - angle2
elif (angle1 < 0 and angle2 <= 0) and angle1 < angle2:
return 2 * math.pi + angle1 + abs(angle2)
elif (angle1 <= 0 and angle2 < 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif angle1 <= 0 < angle2:
return 2 * math.pi + angle1 - angle2
elif angle1 >= 0 >= angle2:
return angle1 + abs(angle2)
else:
return 0 | 377d1915e58a96b7f1526dceb31febf45c90567b | 709,015 |
def merge_nd(nd_cdp, nd_lldp):
""" Merge CDP and LLDP data into one structure """
neis = dict()
nd = list()
for n in nd_lldp:
neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])] = n
for n in nd_cdp:
# Always prefer CDP, but grab description from LLDP if available
if (n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int']) in n:
if 'description' in neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])]:
n['description'] = neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])]['description']
neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])] = n
for n in neis:
nd.append(neis[n])
return nd | 90d55ffdabb6c28198ee4c59bc36fdcb6fa54e62 | 709,016 |
def noiseless(rho, unitary):
"""Returns the noiseless predictions."""
rhotilde = unitary @ rho @ unitary.conj().T
elt = rhotilde[0, 0]
if elt >= 0.49999999:
return 0, elt
return 1, elt | bfa265046361b159e7d264aa8312b75cd7a0df3f | 709,017 |
import base64
import uuid
import os
def copy_data_to_device(device, data, destination, filename=None):
""" Copies data into a device and creates a file to store that data.
Args:
data ('str'): The data to be copied
destination ('str'): Folder of where to store file
filename ('str'): Name of the file created. If left none then a
random name will be generated
Raise:
Exception: Permission Denied, File Creation Failed
Returns:
Path (str): path of created file
"""
try:
device.execute('ls {}'.format(destination))
except Exception:
raise FileNotFoundError("Directory '{}' does not exist.".format(
destination))
# Data must end in new line
if len(data) > 0 and not data[-1] == "\n":
data += "\n"
# Transforms text data into base64 string
encoded = base64.b64encode(bytes(data, "utf-8")).decode("utf-8")
if filename is None:
id = uuid.uuid4().hex
filename = os.path.join(destination, id)
else:
filename = os.path.join(destination, filename)
# Decode base 64 data into file
device.execute("DATA=\"{}\"".format(encoded))
device_out = device.execute("echo $DATA | base64 -d > {}".format(filename))
if 'Permission denied' in device_out:
raise Exception("Permission denied while trying to create file. " + \
"Make sure {} has the correct permissions!".format(filename))
# Verify file has been successfully created
try:
device.execute("ls {}".format(filename))
except Exception:
raise Exception("Creating of file {} has failed. No file created."
.format(filename))
if int(device.execute('stat {} --printf="%s\\n"'.format(filename))) == 0:
raise Exception("Creating of file {} has failed. Created file has no content"
.format(filename))
return filename | 73fad19637363a31c19e55c59e42479f2b9b0c84 | 709,018 |
from typing import Any
from sys import version
def version_callback() -> Any:
"""Print the version of the package."""
print(f"version: {version}")
return version | 987643727d133dc09163cebd6c4293f78b0b7f6a | 709,019 |
def fill(bitdef, value):
"""
Fill undefined bits with a value.
For example ``1..0100.1`` becomes ``111010011`` when filled with 1s.
Args:
bitdef (str): The bitdef to fill.
value (str): The value to fill with, "0" or "1".
Returns:
str: The filled bitdef.
"""
output = ""
for bit in bitdef:
if bit == ".":
output += value
else:
output += bit
return output | eef3ac59a2a7c4d1a25851a2ca14b3ffed6d1463 | 709,020 |
import os
def _get_relative_maddir(maddir, port):
""" Return a relative path version of maddir
GPDB and HAWQ installations have a symlink outside of GPHOME that
links to the current GPHOME. After a DB upgrade, this symlink is updated to
the new GPHOME.
'maddir_lib', which uses the absolute path of GPHOME, is hardcoded into each
madlib function definition. Replacing the GPHOME path with the equivalent
relative path makes it simpler to perform DB upgrades without breaking MADlib.
"""
if port not in ('greenplum', 'hawq'):
# do nothing for postgres
return maddir
# e.g. maddir_lib = $GPHOME/madlib/Versions/1.9/lib/libmadlib.so
# 'madlib' is supposed to be in this path, which is the default folder
# used by GPPKG to install madlib
try:
abs_gphome, tail = maddir.split('madlib/')
except ValueError:
return maddir
link_name = 'greenplum-db' if port == 'greenplum' else 'hawq'
# Check outside $GPHOME if there is a symlink to this absolute path
# os.pardir is equivalent to ..
# os.path.normpath removes the extraneous .. from that path
rel_gphome = os.path.normpath(os.path.join(abs_gphome, os.pardir, link_name))
if os.path.islink(rel_gphome) and os.path.realpath(rel_gphome) == os.path.realpath(abs_gphome):
# if the relative link exists and is pointing to current location
return os.path.join(rel_gphome, 'madlib', tail)
else:
return maddir | 7ad76b8d44f68ebd61813a851672b4f4aa18b77d | 709,021 |
from typing import Dict
def hash_dict(data: Dict) -> int:
"""
Hashes a Dictionary recursively.
List values are converted to Tuples.
WARNING: Hashing nested dictionaries is expensive.
"""
cleaned_dict: Dict = {}
def _clean_dict(data: Dict) -> Dict:
d: Dict = {}
for k, v in data.items():
if isinstance(v, list) or isinstance(v, set):
d[k] = tuple(v)
elif isinstance(v, dict):
d[k] = hash_dict(v)
else:
d[k] = v
return d
cleaned_dict = _clean_dict(data)
return hash(tuple(sorted(cleaned_dict.items()))) | 42b579151c90a42fadf2b53751978eec421ea03c | 709,022 |
def vector_to_diagonal(v):
"""Converts a vector to a diagonal matrix with vector elements
as the diagonal elements of the matrix"""
diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))]
for i in range(len(v)):
diag_matrix[i][i] = v[i]
return diag_matrix | 6cbaf54a083633a47af92acc7f69421ed68a1c0b | 709,023 |
def password_provider():
"""
Provides the full password check
"""
return [(n,) for n in range(5)] | afdb188844e4b0979528b290477130313679e4df | 709,024 |
def is_abbreviation(sentence):
"""
Evaluate a word to be an abbreviation if the immediate word before the
period contains a capital letter and not a single word sentence.
"""
sentence_split = sentence.split(" ")
if len(sentence_split) == 1:
return False
elif len(sentence_split[-1]) <= 3 and \
any(x.isupper() for x in sentence_split[-1]):
return True
else:
return False | a6f6ceae5b3b9adb7817a913e80a6af86b6d27d5 | 709,025 |
def compose_redis_key(vim_name, identifier, identifier_type="vdu"):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
identifier (str): The VDU or VNF uuid (NFVI based)
identifier_type (str): the identifier type. Default type is vdu. Also vnf is supported.
Returns:
str: the key for redis
"""
if identifier_type == "vnf":
return "{}:vnf#{}".format(vim_name.lower(), identifier)
else:
return "{}:{}".format(vim_name.lower(), identifier) | e9a03cf9ff704fea8b9cdf75c59695568e366649 | 709,026 |
def calGridID(locs, id, SPLIT = 0.0005):
"""
根据城市网格编号还原经纬度信息
:param locs:
:param id:
:param SPLIT=0.05:
"""
centerincrement = SPLIT/2.0
LNGNUM = int((locs['east'] - locs['west']) / SPLIT + 1)
latind = int(id / LNGNUM)
lngind = id - latind * LNGNUM
lat = (locs['south'] + latind * SPLIT)
lng = (locs['west'] + lngind * SPLIT)
lngcen = (lng + centerincrement)
latcen = (lat + centerincrement)
return "%.3f,%.3f" % (latcen, lngcen)
# {
# 'lat': latcen,
# 'lng': lngcen
# } | 8df119ff82bc1d3c14dbdfe358af6d956d6a52a2 | 709,027 |
def linear(x, *p):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
return p[0] * x + p[1] | 07ef5fc7c5e78148528cccd09fe14c37cad22ead | 709,028 |
def convert_price_text(t):
"""
convert "$175/month' to 175
:param t:
:return: price, unit (i.e. 175, 'month')
"""
tok = t.split('$')[1]
if '/' in tok:
price, unit = tok.split('/')
else:
price = tok
unit = None
return float(price.strip().strip('$').replace(',', '')), unit | b42d26dcd4eb1b2c2f8c5a63ddc9d48469e30a52 | 709,029 |
def _get_prob_k_given_L(B, N=None):
"""
Helper function.
"""
if N is None:
N = int(B[0, 1])
return B / N | be1d0848b148b3413aaee2c5549bd6063e1f2d33 | 709,030 |
def force_orders(self, **kwargs):
"""User's Force Orders (USER_DATA)
GET /fapi/v1/forceOrders
https://binance-docs.github.io/apidocs/futures/en/#user-39-s-force-orders-user_data
Keyword Args:
symbol (str, optional)
autoCloseType (str, optional): "LIQUIDATION" for liquidation orders, "ADL" for ADL orders.
startTime (int, optional)
endTime (int, optional)
limit (int, optional): Default 50; max 100.
recvWindow (int, optional)
Notes:
If "autoCloseType" is not sent, orders with both of the types will be returned
If "startTime" is not sent, data within 7 days before "endTime" can be queried
"""
payload = {**kwargs}
url_path = "/fapi/v1/forceOrders"
return self.sign_request("GET", url_path, payload) | 6e848820e17e54df0f275ec4087d9c609d4e08fa | 709,031 |
from datetime import datetime
def now(mydateformat='%Y%m%dT%H%M%S'):
""" Return current datetime as string.
Just a shorthand to abbreviate the common task to obtain the current
datetime as a string, e.g. for result versioning.
Args:
mydateformat: optional format string (default: '%Y%m%dT%H%M%S')
Returns:
datetime.now(), formated to string with argument mydateformat, e.g.
YYYYMMDDThhmmss ==> 20131007H123456
"""
return datetime.now().strftime(mydateformat) | f4f98116700888a4be273143d635c62859c96e03 | 709,032 |
def replace_dict(d, **kwargs):
"""
Replace values by keyword on a dict, returning a new dict.
"""
e = d.copy()
e.update(kwargs)
return e | be1cc21be5320eeea13307dd4ed5025b51339eec | 709,033 |
def pageHeader(
headline="",
tagline=""):
"""
*Generate a pageHeader - TBS style*
**Key Arguments:**
- ``headline`` -- the headline text
- ``tagline`` -- the tagline text for below the headline
**Return:**
- ``pageHeader`` -- the pageHeader
"""
pageHeader = """
<div class="page-header" id=" ">
<h1>%(headline)s<br><small>%(tagline)s</small></h1>
</div>""" % locals()
return pageHeader | 7d9e91df8af2fff92b0b7096cd1a13198d899e15 | 709,034 |
def get_counter_merge_suggestion(merge_suggestion_tokens):
"""Return opposite of merge suggestion
Args:
merge_suggestion_tokens (list): tokens in merge suggestion
Returns:
str: opposite of merge suggestion
"""
counter_merge_suggestion = ' '.join(merge_suggestion_tokens)
if merge_suggestion_tokens[-1][-1] == '་':
counter_merge_suggestion += " "
return counter_merge_suggestion | e32e0f1b64fe77acaa8d88d72dca9304b7427674 | 709,035 |
import os
import sys
def get_secret(name):
"""Load a secret from file or env
Either provide ``{name}_FILE`` or ``{name}`` in the environment to
configure the value for ``{name}``.
"""
try:
with open(os.environ[name + "_FILE"]) as secret_file:
return secret_file.read().strip()
except (FileNotFoundError, PermissionError, KeyError):
try:
return os.environ[name]
except KeyError:
if os.path.basename(sys.argv[0]) == 'sphinx-build':
# We won't have nor need secrets when building docs
return None
raise ValueError(
f"Missing secrets: configure {name} or {name}_FILE to contain or point at secret"
) from None | 3b240f7b494c7817f58c8ab3f7f9000ff7f85844 | 709,036 |
import sys
def is_bst(root):
""" checks if binary tree is binary search tree """
def is_bst_util(root, min_value, max_value):
""" binary search tree check utility function """
if root is None:
return True
if (root.data >= min_value and root.data < max_value
and is_bst_util(root.left, min_value, root.data)
and is_bst_util(root.right, root.data, max_value)):
return True
return False
return is_bst_util(root, -sys.maxsize - 1, sys.maxsize) | 46828b5b3fc1827908faf7b9bb646bc3b6594b30 | 709,038 |
import math
import numpy as np
def pad_images(images, nlayers):
"""
In Unet, every layer the dimension gets divided by 2
in the encoder path. Therefore the image size should be divisible by 2^nlayers.
"""
divisor = 2**nlayers
nlayers, x, y = images.shape # essentially setting nlayers to z direction so return is z, x, y
x_pad = int((math.ceil(x / float(divisor)) * divisor) - x)
y_pad = int((math.ceil(y / float(divisor)) * divisor) - y)
padded_image = np.pad(images, ((0,0),(0, x_pad), (0, y_pad)), 'constant', constant_values=(0, 0))
return padded_image | 671fa940d0a0ed87819335b60d12d9e268bf9932 | 709,039 |
def good2Go(SC, L, CC, STR):
"""
Check, if all input is correct and runnable
"""
if SC == 1 and L == 1 and CC == 1 and STR == 1:
return True
else:
print(SC, L, CC, STR)
return False | e49229df6b9b187e1840d5bc5c8a1a8e087a5a4e | 709,040 |
import os
def list_dir(filepath):
"""List the files in the directory"""
return sorted(list(map(lambda x: os.path.join(filepath, x), os.listdir(filepath)))) | 29c50f132b5abfdfea819db58a816a83e6efaccd | 709,041 |
def seconds_to_timestamp(seconds):
"""
Convert from seconds to a timestamp
"""
minutes, seconds = divmod(float(seconds), 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%06.3f" % (hours, minutes, seconds) | 8b9806f05fe4796baae51001e69455e82fb51eed | 709,042 |
def readbit(val, bitidx):
""" Direct word value """
return int((val & (1<<bitidx))!=0) | 4ca368f89b2496ec46c1641835c1f2a0a1cdd573 | 709,043 |
def coord_to_gtp(coord, board_size):
""" From 1d coord (0 for position 0,0 on the board) to A1 """
if coord == board_size ** 2:
return "pass"
return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[int(coord % board_size)],\
int(board_size - coord // board_size)) | a0419e8a7f39cd282585ed1d29d94bbded0e3f1c | 709,044 |
def scorer(func):
"""This function is a decorator for a scoring function.
This is hack a to get around self being passed as the first argument to the scoring function."""
def wrapped(a, b=None):
if b is not None:
return func(b)
return func(a)
return wrapped | 39ec390982d26d10a6ce827800df654ff6c4ab42 | 709,045 |
def _calculate_risk_reduction(module):
"""
Function to calculate the risk reduction due to testing. The algorithms
used are based on the methodology presented in RL-TR-92-52, "SOFTWARE
RELIABILITY, MEASUREMENT, AND TESTING Guidebook for Software
Reliability Measurement and Testing." Rather than attempting to
estimate the software failure rate, RTK provides a risk index for the
software based on the same factors used in RL-TR-92-52 for estimating
software failure rates. RTK also provides test planning guidance in
the same manner as RL-TR-92-52.
:param module: the :py:class:`rtk.software.CSCI.Model` or
:py:class:`rtk.software.Unit.Model` data model to calculate.
:return: _error_code
:rtype: int
"""
# WARNING: Refactor _calculate_risk_reduction; current McCabe Complexity metric = 13.
_error_code = 0
# Calculate the risk reduction due to the test effort.
try:
if module.test_effort == 1: # Labor hours
_test_ratio = float(module.labor_hours_test) / \
float(module.labor_hours_dev)
elif module.test_effort == 2: # Budget
_test_ratio = float(module.budget_test) / \
float(module.budget_dev)
elif module.test_effort == 3: # Schedule
_test_ratio = float(module.schedule_test) / \
float(module.schedule_dev)
else:
_test_ratio = 1.0
except ZeroDivisionError:
_error_code = 10
_test_ratio = 0.0
module.te = 1.0
if _test_ratio > 0.4:
module.te = 0.9
# Calculate the risk reduction due to test methods used.
module.tm = 1.0
module.tu = sum([_tu[0] for _tu in module.lst_test_selection])
module.tt = sum([_tt[1] for _tt in module.lst_test_selection])
try:
if module.tu / module.tt > 0.75:
module.tm = 0.9
elif module.tu / module.tt < 0.5:
module.tm = 1.1
except ZeroDivisionError:
_error_code = 10
# Calculate the risk reduction due to test coverage.
try:
if module.level_id == 2: # Module
_VS = ((float(module.nm_test) / float(module.nm)) +
(float(module.interfaces_test) /
float(module.interfaces))) / 2.0
elif module.level_id == 3: # Unit
_VS = ((float(module.branches_test) / float(module.branches)) +
(float(module.inputs_test) / float(module.inputs))) / 2.0
else:
_VS = 1.0
except ZeroDivisionError:
_error_code = 10
_VS = 1.0
module.tc = 1.0 / _VS
module.t_risk = module.te * module.tm * module.tc
return _error_code | c8876bc247243f13572d49c07063a063ba4eb42a | 709,046 |
def timeframe_int_to_str(timeframe: int) -> str:
"""
Convert timeframe from integer to string
:param timeframe: minutes per candle (240)
:return: string representation for API (4h)
"""
if timeframe < 60:
return f"{timeframe}m"
elif timeframe < 1440:
return f"{int(timeframe / 60)}h"
else:
return f"{int(timeframe / 1440)}d" | 75778742dea8204c74a47bfe92c25aef43ebbad8 | 709,047 |
def simplify(tile):
"""
:param tile: 34 tile format
:return: tile: 0-8 presentation
"""
return tile - 9 * (tile // 9) | c8543d73e37d4fa1d665d3d28277ff99095e0635 | 709,049 |
import numpy
def convert_image_points_to_points(image_positions, distances):
"""Convert image points to 3d points.
Returns:
positions
"""
hypotenuse_small = numpy.sqrt(
image_positions[:, 0]**2 +
image_positions[:, 1]**2 + 1.0)
ratio = distances / hypotenuse_small
n = image_positions.shape[0]
positions = numpy.zeros([n, 3])
positions[:, 0] = -image_positions[:, 0] * ratio
positions[:, 1] = ratio
positions[:, 2] = -image_positions[:, 1] * ratio
return positions | 3680a02997cf1109fd08f61c6642b29ea3433f1d | 709,050 |
from pathlib import Path
def is_submodule_repo(p: Path) -> bool:
"""
"""
if p.is_file() and '.git/modules' in p.read_text():
return True
return False | 26675ee25e431778325081ec80d45ff3d72c2046 | 709,051 |
def shift_contig(df2, remove):
"""
The function append shifted fragment from
sort_cluster_seq function.
Parameters
----------
df2 : pandas DataFrame
DataFrame NRPS cluster fragment.
remove : list
List of cluster fragment, which should removed.
Returns
-------
df2 : pandas DataFrame
Corrected DataFrame with NRPS meta information.
"""
for gen in remove:
df2 = df2.append(gen)
return df2 | 7df891785fc58d818af5b423c7fdbc3c4382951f | 709,052 |
def single_data_path(client, node_id):
"""
In order for a shrink to work, it should be on a single filesystem, as
shards cannot span filesystems. Return `True` if the node has a single
filesystem, and `False` otherwise.
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:rtype: bool
"""
return len(client.nodes.stats()['nodes'][node_id]['fs']['data']) == 1 | ae0b34f82acb6d12faf525f0270250cdf471a6f8 | 709,053 |
import pydoc
def spec(func):
"""return a string with Python function specification"""
doc = pydoc.plain(pydoc.render_doc(func))
return doc.splitlines()[2] | 00b96364f77141fedd7d50396946fd4e29cc5d02 | 709,054 |
import os
import time
def file_age(file_name):
"""
Returns the age of a file in seconds from now. -1 if the file does not exist.
:param file_name: file name
.. versionadded:: 9.3.1
"""
if not os.path.exists(file_name):
return -1
return time.time() - os.path.getmtime(file_name) | 9cefc1da2f7ab1c44fbe9dc4f63a5d51bc088ab8 | 709,055 |
def getRatios(vect1, vect2):
"""Assumes: vect1 and vect2 are equal length lists of numbers
Returns: a list containing the meaningful values of
vect1[i]/vect2[i]"""
ratios = []
for index in range(len(vect1)):
try:
ratios.append(vect1[index]/vect2[index])
except ZeroDivisionError:
ratios.append(float('nan')) #nan = Not a Number
except:
raise ValueError('getRatios called with bad arguments')
return ratios | e28f871986ab2b1b87cc3671b1c27ad14a0aadf8 | 709,056 |
def sampleset():
"""Return list with 50 positive and 10 negative samples"""
pos = [(0, i) for i in range(50)]
neg = [(1, i) for i in range(10)]
return pos + neg | 77e5a0ca3ad8757f0ded2aec9d73312a66ac9044 | 709,057 |
def reverse_lookup(d, v):
"""
Reverse lookup all corresponding keys of a given value.
Return a lisy containing all the keys.
Raise and exception if the list is empty.
"""
l = []
for k in d:
if d[k] == v:
l.append(k)
if l == []:
raise ValueError
else:
return l | d68f437aec47df964905779f99d58be84515fb72 | 709,058 |
from pathlib import Path
def cpe2pkg_tool():
"""Unsupported ecosystem CVE fixture."""
bin = Path(__file__).parent.parent / Path('tools/bin/cpe2pkg.jar')
if bin.exists():
return str(bin)
else:
raise RuntimeError('`cpe2pkg.jar` is not available, please run `make build-cpe2pkg once.`') | 7ad5489cd560f2820a5e77c46964514a5a34edc9 | 709,059 |
import threading
def spawn_thread(func, *args, **kwds):
"""
Utility function for creating and starting a daemonic thread.
"""
thr = threading.Thread(target=func, args=args, kwargs=kwds)
thr.setDaemon(True)
thr.start()
return thr | afaace7e02870390acb297106ac9d35c9a931a59 | 709,060 |
import sys
def decision(question):
"""Asks user for a question returning True/False answed"""
if sys.version_info[0] < 3:
if raw_input("\n%s [Y/n] " % question) in ["", "y", "Y"]:
return True
else:
if input("\n%s [Y/n] " % question) in ["", "y", "Y"]:
return True
return False | 8d31e2f11ad9aa2d0d35f35078ffb46ca0718f09 | 709,061 |
import torch
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred) | 1b1ad83b9b4ae06f2bc80209e4e7339a421a39f3 | 709,062 |
def pres_from_hybrid(psfc, hya, hyb, p0=100000.):
"""Return pressure field on hybrid-sigma coordinates,
assuming formula is
p = a(k)*p0 + b(k)*ps.
"""
return hya*p0 + hyb*psfc | 4ebd90fb807ab9ea4c2b45d27da6f8b420c107f7 | 709,064 |
import numpy
def psf_gaussian(psf_shape, psf_waist, psf_physical_size=1, psf_nphoton=2):
"""Return 3D gaussian approximation of PSF."""
def f(index):
s = psf_shape[index] // 2 * psf_physical_size
c = numpy.linspace(-s, s, psf_shape[index])
c *= c
c *= -2.0 / (psf_waist[index] * psf_waist[index])
return c
psf = numpy.exp(
numpy.sum(
numpy.meshgrid(f(0), f(1), f(2), indexing='ij', sparse=False),
axis=0,
)
)
if psf_nphoton != 1:
numpy.power(psf, psf_nphoton, out=psf)
return psf | 77ccab6aaa141564751a0eafd13398f904673006 | 709,066 |
def MapToSingleIncrease(val):
"""
Need 30 minute values to be sequential for some of the tools(i.e. 1,2,3,4) so using a format
like 5,10,15,20 won't work.
"""
return val/5 | fe89d7ccb8bef511e2ad90a07ad0346c58ba894d | 709,067 |
def registra_aluno(nome, ano_entrada, ano_nascimento, **misc):
"""Cria a entrada do registro de um aluno."""
registro = {'nome': nome,
'ano_entrada': ano_entrada,
'ano_nascimento': ano_nascimento}
for key in misc:
registro[key] = misc[key]
return registro | e56da99ec90de9ebca204ccc3c3f3555b9bbbc64 | 709,068 |
def count_good_deals(df):
"""
7. Считает число прибыльных сделок
:param df: - датафрейм с колонкой '<DEAL_RESULT>'
:return: - число прибыльных сделок
"""
# http://stackoverflow.com/questions/27140860/count-occurrences-of-number-by-column-in-pandas-data-frame?rq=1
return (df['<DEAL_RESULT>'] > 0).sum() | 1f3ef9b9e0f7924d45d5ce84a77938f19386b6bc | 709,069 |
def can_write(obj, user):
"""
Takes article or related to article model.
Check if user can write article.
"""
return obj.can_write(user) | 9cb7cc046b63fb82670c4667abe169d6a1a279e4 | 709,070 |
import re
def is_C2D(lname):
"""
"""
pattns = ['Conv2D']
return any([bool(re.match(t,lname)) for t in pattns]) | a12bfd9857543e568148659f782615b3f2de4b83 | 709,071 |
def encounter_media(instance, filename):
"""Return an upload file path for an encounter media attachment."""
if not instance.encounter.id:
instance.encounter.save()
return 'encounter/{0}/{1}'.format(instance.encounter.source_id, filename) | 79e4d8fae1d41edf362e99e6da11442a71565aa0 | 709,072 |
def remove_head_id(ref, hyp):
"""Assumes that the ID is the begin token of the string which is common
in Kaldi but not in Sphinx."""
ref_id = ref[0]
hyp_id = hyp[0]
if ref_id != hyp_id:
print('Reference and hypothesis IDs do not match! '
'ref="{}" hyp="{}"\n'
'File lines in hyp file should match those in the ref file.'.format(ref_id, hyp_id))
exit(-1)
ref = ref[1:]
hyp = hyp[1:]
return ref, hyp | 210798e8a02f555f70a1d9f2de9ce098dd0669fb | 709,073 |
def get_vaccinated_model(model, area=None):
"""Get all states that can be vaccinated or recovered (by area).
Parameters
----------
model : amici.model
Amici model which should be evaluated.
areas : list
List of area names as strings.
Returns
-------
states : list
List of states that can be vaccinated.
"""
if area is None:
states = [
x
for x in model.getStateNames()
if not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
]
else:
states = [
x
for x in model.getStateNames()
if (
not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
)
and (area in x)
]
return states | c03a9d048abb08561463b1975ffec663f24267b3 | 709,074 |
from datetime import datetime
def MicrosecondsToDatetime(microseconds):
"""Returns a datetime given the number of microseconds, or None."""
if microseconds:
return datetime.utcfromtimestamp(float(microseconds) / 1000000)
return None | 69fd3dc3b8d1a97e7a64037cabe988365b2c6e63 | 709,075 |
def dynamic_import(import_string):
"""
Dynamically import a module or object.
"""
# Use rfind rather than rsplit for Python 2.3 compatibility.
lastdot = import_string.rfind('.')
if lastdot == -1:
return __import__(import_string, {}, {}, [])
module_name, attr = import_string[:lastdot], import_string[lastdot + 1:]
parent_module = __import__(module_name, {}, {}, [attr])
return getattr(parent_module, attr) | f6418ff17f3d480b22abac1146d946a5f990cb3c | 709,076 |
import requests
def getExternalIP():
""" Returns external ip of system """
ip = requests.get("http://ipv4.myexternalip.com/raw").text.strip()
if ip == None or ip == "":
ip = requests.get("http://ipv4.icanhazip.com").text.strip()
return ip | 77847063a2da7c6484dd6e569786a012b3a0a62f | 709,077 |
def intersection_indices(a, b):
"""
:param list a, b: two lists of variables from different factors.
returns a tuple of
(indices in a of the variables that are in both a and b,
indices of those same variables within the list b)
For example, intersection_indices([1,2,5,4,6],[3,5,1,2]) returns
([0, 1, 2], [2, 3, 1]).
"""
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
mapA = []
mapB = []
for i, itm in enumerate(a):
if itm in bind:
mapA.append(i)
mapB.append(bind.get(itm))
return mapA, mapB | 55264faaa4fd5e6dc5365b675ebd3b7f6a1e1280 | 709,078 |
import re
def validate_password(password, password_repeat=None):
"""
Validate user password.
:param password: password as string
:param password_repeat: repeat password
:return: False - valid password
"""
if password_repeat:
if password != password_repeat:
return "Passwords did not match."
flag = False
if len(password) < 8:
flag = True
elif not re.search("[a-z]", password):
flag = True
elif not re.search("[A-Z]", password):
flag = True
elif not re.search("[0-9]", password):
flag = True
elif re.search("\s", password):
flag = True
if flag:
return (
"Password must contain at least a lower case, an upper case, a number, no spaces "
"and be at least 9 characters."
)
return False | 2987a1bec151e173156ab6a72345864c84dcb61c | 709,079 |
def dict_depth(d):
"""
递归地获取一个dict的深度
d = {'a':1, 'b': {'c':{}}} --> depth(d) == 3
"""
if isinstance(d, dict):
return 1 + (max(map(dict_depth, d.values())) if d else 0)
return 0 | 16f4164fdea08af9d5846a5866428c81848726b9 | 709,080 |
def calculate_recall(tp, n):
"""
:param tp: int
Number of True Positives
:param n: int
Number of total instances
:return: float
Recall
"""
if n == 0:
return 0
return tp / n | b8a36488af59e036acdb50821716ae34287e6b8f | 709,081 |
def date_to_num(date):
"""Convert datetime to days since 1901"""
num = (date.year - 1901) * 365.25
num += [
0, 31, 59.25, 90.25, 120.25,
151.25, 181.25, 212.25, 243.25,
273.25, 304.25, 334.25
][date.month - 1]
num += date.day
return int(num) | 88e342e0fc80a5998df8e5f1ab0002e0f7fe808e | 709,082 |
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg) | e0171c3b0eee18c7fcc44cbdfe007949feabba9a | 709,083 |
import torch
def ltria2skew(L):
"""
assume L has already passed the assertion check
:param L: lower triangle matrix, shape [N, 3]
:return: skew sym A [N, 3, 3]
"""
if len(L.shape) == 2:
N = L.shape[0]
# construct the skew-sym matrix
A = torch.zeros(N, 3, 3).cuda() # [N, 3, 3]
A[:, 1, 0] = L[:, 0]
A[:, 2, 0] = L[:, 1]
A[:, 2, 1] = L[:, 2]
A[:, 0, 1] = -L[:, 0]
A[:, 0, 2] = -L[:, 1]
A[:, 1, 2] = -L[:, 2]
return A
elif len(L.shape) == 1:
A = torch.zeros(3, 3).cuda()
A[1, 0] = L[0]
A[2, 0] = L[1]
A[2, 1] = L[2]
A[0, 1] = -L[0]
A[0, 2] = -L[1]
A[1, 2] = -L[2]
return A
else:
raise NotImplementedError | 6e74c181fc8efcdc28ba35578f31fb6f2a7fa1bb | 709,084 |
def _converge(helper, rcs, group):
"""
Function to be passed to :func:`_oob_disable_then` as the ``then``
parameter that triggers convergence.
"""
return group.trigger_convergence(rcs) | 8aab701dc7e29d83d6c8ab8b71c37837feb72847 | 709,085 |
import math
def calc_obstacle_map(ox, oy, resolution, vr):
"""
Build obstacle map according to the distance of a
certain grid to obstacles. Treat the area near the
obstacle within the turning radius of the vehicle
as the obstacle blocking area and mark it as TRUE.
"""
min_x = round(min(ox))
min_y = round(min(oy))
max_x = round(max(ox))
max_y = round(max(oy))
x_width = round(max_x - min_x)
y_width = round(max_y - min_y)
# obstacle map generation
obstacle_map = [[False for _ in range(y_width)] for _ in range(x_width)]
for ix in range(x_width):
x = ix + min_x
for iy in range(y_width):
y = iy + min_y
# print(x, y)
for iox, ioy in zip(ox, oy):
d = math.sqrt((iox - x)**2 + (ioy - y)**2)
if d * resolution <= vr:
obstacle_map[ix][iy] = True
break
return obstacle_map, min_x, min_y, max_x, max_y, x_width, y_width | 87d44c5eb799bf3b2ea64ac0717b8d7f260a4a37 | 709,086 |
def _emit_params_file_action(ctx, path, mnemonic, cmds):
"""Helper function that writes a potentially long command list to a file.
Args:
ctx (struct): The ctx object.
path (string): the file path where the params file should be written.
mnemonic (string): the action mnemomic.
cmds (list<string>): the command list.
Returns:
(File): an executable file that runs the command set.
"""
filename = "%s.%sFile.params" % (path, mnemonic)
f = ctx.new_file(ctx.configuration.bin_dir, filename)
ctx.file_action(output = f,
content = "\n".join(["set -e"] + cmds),
executable = True)
return f | adafb75e24b2023ad2926e4248e8b2e1e6966b8e | 709,087 |
import textwrap
def ignore_firstline_dedent(text: str) -> str:
"""Like textwrap.dedent(), but ignore first empty lines
Args:
text: The text the be dedented
Returns:
The dedented text
"""
out = []
started = False
for line in text.splitlines():
if not started and not line.strip():
continue
if not started:
started = True
out.append(line)
return textwrap.dedent("\n".join(out)) | 04bde49e72e07552f2f88e9112546d00b85a2879 | 709,088 |
def read_file(filename):
"""
Read a file and return its binary content. \n
@param filename : filename as string. \n
@return data as bytes
"""
with open(filename, mode='rb') as file:
file_content = file.read()
return file_content | 2417aa5cfa0d43303f9f6103e8b1fee9e8d652e2 | 709,089 |
def get_character(data, index):
"""Return one byte from data as a signed char.
Args:
data (list): raw data from sensor
index (int): index entry from which to read data
Returns:
int: extracted signed char value
"""
result = data[index]
if result > 127:
result -= 256
return result | 5a08102cb9dc8ae7e2adcab9b5653b77ee2c6ae3 | 709,091 |
def colour_from_loadings(loadings, maxLoading=None, baseColor="#FF0000"):
"""Computes colors given loading values.
Given an array of loading values (loadings), returns an array of
colors that graphviz can understand that can be used to colour the
nodes. The node with the greatest loading uses baseColor, and a node
with zero loading uses white (#FFFFFF).
This is achieved through clever sneaky use of the alpha channel."""
if maxLoading is None:
maxLoading = max(loadings)
return [baseColor + hex(int(loading / maxLoading * 255))[2:]
for loading in loadings] | 8bd65e5b4aa54558d3710a8518bbbe6400559046 | 709,092 |
def determineDocument(pdf):
""" Scans the pdf document for certain text lines and determines the type of investment vehicle traded"""
if 'turbop' in pdf or 'turboc' in pdf:
return 'certificate'
elif 'minil' in pdf:
return 'certificate'
elif 'call' in pdf or 'put' in pdf:
return 'warrant'
else:
return 'stock' | e6c5adc10168321fd6a534dd8e9fbf2e8ccb1615 | 709,093 |
import subprocess
def tmux_session_detection(session_name: str) -> bool:
"""
Function checks if session already exists.
"""
cmd = ['tmux', 'has-session', '-t', session_name]
result = subprocess.call(cmd, stderr=subprocess.DEVNULL)
if result == 0:
return True
else:
return False | 275d85e087fa271c76fe44f2a67ea4c719e0c031 | 709,094 |
def has_read_perm(user, group, is_member, is_private):
""" Return True if the user has permission to *read*
Articles, False otherwise.
"""
if (group is None) or (is_member is None) or is_member(user, group):
return True
if (is_private is not None) and is_private(group):
return False
return True | 6c1bc51abd50a5af76e16e7723957c758822c988 | 709,095 |
def dot_to_underscore(instring):
"""Replace dots with underscores"""
return instring.replace(".", "_") | cf9441702ffb128678a031eabb4fa48be881cae5 | 709,096 |
def rstrip_tuple(t: tuple):
"""Remove trailing zeroes in `t`."""
if not t or t[-1]:
return t
right = len(t) - 1
while right > 0 and t[right - 1] == 0:
right -= 1
return t[:right] | a10e74ea4a305d588fbd1555f32dda1d4b95266e | 709,098 |
def perfect_score(student_info):
"""
:param student_info: list of [<student name>, <score>] lists
:return: first `[<student name>, 100]` or `[]` if no student score of 100 is found.
"""
#
first = []
student_names = []
score = []
print (student_info)
for name in student_info:
print('1', 'name', name[0])
print ('2','score',name[1])
print(type(name[1]))
score = int(name[1])
print(type(score))
if (score == 100 ):
print('3', score)
print(name)
return name
return first | ac7580cce134627e08764031ef2812e1b70ba00f | 709,099 |
import argparse
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--cl_kernel_dir",
type=str,
default="./mace/ops/opencl/cl/",
help="The cl kernels directory.")
parser.add_argument(
"--output_path",
type=str,
default="./mace/examples/codegen/opencl/opencl_encrypted_program.cc",
help="The path of encrypted opencl kernels.")
return parser.parse_known_args() | 86b45bfeb0ebfbc4e3e4864b55736b6c5bb42954 | 709,100 |
def clamp(min_v, max_v, value):
"""
Clamps a value between a min and max value
Args:
min_v: Minimum value
max_v: Maximum value
value: Value to be clamped
Returns:
Returns the clamped value
"""
return min_v if value < min_v else max_v if value > max_v else value | 1a9aaf3790b233f535fb864215444b0426c17ad8 | 709,101 |
def collatz(n):
"""Sequence generation."""
l = []
while n > 1:
l.append(n)
if n % 2 == 0:
n = n / 2
else:
n = (3 * n) + 1
l.append(n)
return l | 69d993147604889fe6b03770efbfa6fb7f034258 | 709,102 |
import re
def number_format(number_string, fill=2):
"""
add padding zeros to make alinged numbers
ex.
>>> number_format('2')
'02'
>>> number_format('1-2')
'01-02'
"""
output = []
digits_spliter = r'(?P<digit>\d+)|(?P<nondigit>.)'
for token in [m.groups() for m in re.finditer(digits_spliter, number_string)]:
if token[0] is None:
output.append(token[1])
else:
output.append(token[0].zfill(2))
return ''.join(output) | ee44167b4597fbe7c9f01fa5b26e02d7608c3677 | 709,103 |
def _convert_paths_to_flask(transmute_paths):
"""flask has it's own route syntax, so we convert it."""
paths = []
for p in transmute_paths:
paths.append(p.replace("{", "<").replace("}", ">"))
return paths | f8ea95e66c68481f0eb5a6d83cf61d098806f6be | 709,104 |
def percent_uppercase(text):
"""Calculates percentage of alphabetical characters that are uppercase, out of total alphabetical characters.
Based on findings from spam.csv that spam texts have higher uppercase alphabetical characters
(see: avg_uppercase_letters())"""
alpha_count = 0
uppercase_count = 0
for char in text:
if char.isalpha():
alpha_count += 1
if char.isupper():
uppercase_count += 1
# calculate percentage - make sure not to divide by 0
try:
perc_uppercase = float(uppercase_count) / float(alpha_count)
return str(perc_uppercase)
except ZeroDivisionError:
return "0" | 61ccf42d06ffbae846e98d1d68a48de21f52c299 | 709,105 |
def _check_stack_axis(axis, dims, default='unnamed'):
""" check or get new axis name when stacking array or datasets
(just to have that in one place)
"""
if axis is None:
axis = default
if axis in dims:
i = 1
while default+"_{}".format(i) in dims:
i+=1
axis = default+"_{}".format(i)
if type(axis) is int:
raise TypeError("axis must be a str (new axis name)")
if axis in dims:
raise ValueError("please provide an axis name which does not \
already exist, or use `concatenate`")
return axis | 4dc74da450d6be4872a5f03e61ec16700b197d94 | 709,106 |
def file_name_to_title_name(file_name):
"""
#Arguments
check_mk_url (str): URL to Check_Mk web application, check file names and print for each file in the directory in the correct format
#Examples
file_name_to_title_name('activate_mode')
output = 'Activate Mode: activate_mode.md'
"""
file_name_list = file_name.split('.py')
file_name = file_name_list[0]
title = file_name.replace('_', ' ').title()
filename2 = ': ' + file_name + '.md'
return title + filename2 | 330eae5c34cd55f01aaf520ea9df467ea4042b1e | 709,107 |
def asynchronous_prod_milp_constraint_rule(backend_model, loc_tech, timestep):
"""
BigM limit set on `carrier_prod`, forcing it to either be zero or non-zero,
depending on whether `prod` is zero or one, respectively.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{carrier_prod}[loc::tech::carrier, timestep] \\leq
\\text{bigM} \\times \\boldsymbol{prod_con_switch}[loc::tech, timestep]
\\forall loc::tech \\in loc::techs_{asynchronous_prod_con},
\\forall timestep \\in timesteps
"""
model_dict = backend_model.__calliope_model_data
loc_tech_carrier = model_dict["data"]["lookup_loc_techs"][loc_tech]
return (
backend_model.carrier_prod[loc_tech_carrier, timestep]
<= backend_model.prod_con_switch[loc_tech, timestep] * backend_model.bigM
) | 049454e9a3aafecc8531225bc5f09b666d892fcb | 709,108 |
import pickle
def read_pickle(filename, protocol=-1, **kwargs):
"""
read grid saved in PICKLE format into a GridData object
:param filename: full path to the filename
:type filename: str
:rtype: ~uquake.core.data.grid.Grid
"""
return pickle.load(open(filename, 'rb')) | 8115b5a91698cc508ea05c3097d8d69b0bb77561 | 709,109 |
import requests
def correct_doi(file_name: str):
"""Attempt extract a DOI from a filename which contains a DOI."""
if file_name.startswith("acs.jced") or file_name.startswith("je"):
doi = f"10.1021/{file_name}"
elif file_name.startswith("j.jct"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("j.fluid"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("j.tca"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("s"):
doi = f"10.1007/{file_name}"
else:
raise NotImplementedError()
doi = doi.replace(".xml", "")
doi_request = requests.get(
f"https://doi.org/{doi}", headers={"Accept": "application/x-bibtex"}
)
doi_request.raise_for_status()
return doi | e01ddf648660e0fd126720042cc16b16ffe078d3 | 709,110 |
def read_shear_catalog_type(stage):
"""
Determine the type of shear catalog a stage is using as input.
Returns a string, e.g. metacal, lensfit.
Also sets shear_catalog_type in the stage's configuration
so that it is available later and is saved in output.
"""
with stage.open_input('shear_catalog', wrapper=True) as f:
shear_catalog_type = f.catalog_type
stage.config['shear_catalog_type'] = shear_catalog_type
return shear_catalog_type | 26dd03f3a2ef66acab47741df044ac8f2a92bbfb | 709,112 |
def __slicer(my_str, sub):
"""
Remove everything in a string before a specified substring is found.
Throw exception if substring is not found in string
https://stackoverflow.com/questions/33141595/how-can-i-remove-everything-in-a-string-until-a-characters-are-seen-in-python
Args:
my_str (string): the string to slice.
sub (string): the substring to stop slicing at.
Returns:
str: substring of my_str, without everything before sub.
Raises:
Exception: Sub string specified is not found in my_str.
"""
index = my_str.find(sub)
if index != -1:
return my_str[index:]
else:
# raise Exception('Sub string not found!')
return my_str | 50f9ef952ee2f9319c39948505852a209e434690 | 709,113 |
def set_diff(seq0, seq1):
"""Return the set difference between 2 sequences as a list."""
return list(set(seq0) - set(seq1)) | ff10464acc65b60e9355e8971c45fbca8025fda6 | 709,114 |
import re
def get_page_likes(response):
"""Scan a page and create a dictionary of the image filenames
and displayed like count for each image. Return the
dictionary."""
# find all flowtow divs
flowtows = response.html.find_all('div', class_='flowtow')
result = dict()
for div in flowtows:
# get the filename from the form hidden input
input = div.find("input", attrs={'name': "filename"})
filename = input['value']
# find the likes element
likesel = div.find(class_='likes')
# grab the integer from this element
m = re.search('\d+', likesel.text)
if m:
likes = int(m.group())
else:
likes = 0
result[filename] = likes
return result | e956e54d18d6540d1a8fd07250a5c758b696bcc5 | 709,115 |
def str_product(string):
""" Calculate the product of all digits in a string """
product = 1
for i in string:
product *= int(i)
return product | c0c7442ac53aaf49760feffa7d08408d7520d9b4 | 709,117 |
import csv
def ConvertCSVStringToList(csv_string):
"""Helper to convert a csv string to a list."""
reader = csv.reader([csv_string])
return list(reader)[0] | fa244d2a1c8c50b2b097883f964f1b5bb7ccf393 | 709,119 |
def find_object_with_matching_attr(iterable, attr_name, value):
"""
Finds the first item in an iterable that has an attribute with the given name and value. Returns
None otherwise.
Returns:
Matching item or None
"""
for item in iterable:
try:
if getattr(item, attr_name) == value:
return item
except AttributeError:
pass
return None | e37b7620bf484ce887e6a75f31592951ed93ac74 | 709,121 |
def contains_rep_info(line):
"""
Checks does that line contains link to the github repo (pretty simple 'algorithm' at the moment)
:param line: string from aa readme file
:return: true if it has link to the github repository
:type line:string
:rtype: boolean
"""
return True if line.find("https://github.com/") != -1 else False | 335e10a654510a4eda7d28d8df71030f31f98ff1 | 709,122 |
def common(list1, list2):
"""
This function is passed two lists and returns a new list containing
those elements that appear in both of the lists passed in.
"""
common_list = []
temp_list = list1.copy()
temp_list.extend(list2)
temp_list = list(set(temp_list))
temp_list.sort()
for i in temp_list:
if (i in list1) and (i in list2):
common_list.append(i)
return common_list | 021605a2aad6c939155a9a35b8845992870100f0 | 709,123 |