content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def word_flipper(our_string):
"""
Flip the individual words in a sentence
Args:
our_string(string): Strings to have individual words flip
Returns:
string: String with words flipped
"""
word_list = our_string.split(" ")
for idx in range(len(word_list)):
word_list[idx] = word_list[idx][::-1] # [index1:index2:step]
return " ".join(word_list) | fd484079407342925fc13583fb1fbee9ee472b14 | 708,675 |
def create_table_string(data, highlight=(True, False, False, False), table_class='wikitable', style=''):
"""
Takes a list and returns a wikitable.
@param data: The list that is converted to a wikitable.
@type data: List (Nested)
@param highlight: Tuple of rows and columns that should be highlighted.
(first row, last row, left column, right column)
@type highlight: Tuple
@param table_class: A string containing the class description.
See wikitable help.
@type table_class: String
@param style: A string containing the style description.
See wikitable help.
@type style: String
"""
last_row = len(data) - 1
last_cell = len(data[0]) - 1
table = '{{| class="{}" style="{}"\n'.format(table_class, style)
for key, row in enumerate(data):
if key == 0 and highlight[0] or key == last_row and highlight[1]:
row_string = '|-\n! ' + '\n! '.join(cell for cell in row)
else:
row_string = '|-'
cells = ''
for ckey, cell in enumerate(row):
if ckey == 0 and highlight[2]:
cells += '\n! ' + cell
elif ckey == last_cell and highlight[3]:
cells += '\n! ' + cell
else:
cells += '\n| ' + cell
row_string += cells
table += row_string + '\n'
table += '|}'
return table | f586fac681e1b4f06ad5e2a1cc451d9250fae929 | 708,678 |
from pathlib import Path
import os
def path_to_dnd(path: Path) -> str:
"""Converts a `Path` into an acceptable value for `tkinterdnd2.`"""
# tkinterdnd2 will only accept fs paths with forward slashes, even on Windows.
wants_sep = '/'
if os.path.sep == wants_sep:
return str(path)
else:
return wants_sep.join(str(path).split(os.path.sep)) | 61c4f88b944551f16f1baf127ddc3ccc5018267a | 708,679 |
def get_logging_format():
"""return the format string for the logger"""
formt = "[%(asctime)s] %(levelname)s:%(message)s"
return formt | 3380cdd34f1a44cf15b9c55d2c05d3ecb81116cb | 708,680 |
import torch
def list2tensors(some_list):
"""
:math:``
Description:
Implemented:
[True/False]
Args:
(:):
(:):
Default:
Shape:
- Input: list
- Output: list of tensors
Examples::
"""
t_list=[]
for i in some_list:
t_list.append(torch.tensor(i))
return t_list | 35efe7c13c8c4f75266eceb912e8afccd25408cf | 708,681 |
import six
from typing import Any
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind | 094298763f9bf1e3e7a421c19e08016f2138b7d7 | 708,682 |
def complement_angle(angle):
""" 90 minus angle, in degrees"""
return 90 - angle; | bca1dfa3158df61e87cbadc47307f68298a237b7 | 708,684 |
def parse_custom_commands(command, separator=";"):
"""Parse run custom command string into the commands list
:param str command: run custom [config] command(s)
:param str separator: commands separator in the string
:rtype: list[str]
"""
if not command:
return []
return command.strip(separator).split(separator) | 4d55ef149aa16e224f5894fb0ef506a1bd8285f3 | 708,685 |
def lower_volatility_band(c, dev_target, band_target, center_target):
"""
| Calculates the lower volatility band
| Name: lower\_volatility\_band\_\ **c**\ \_times\_\ **band_target.name**\ &\ **dev_target.name**\ \_over\_\ **center_target.name**
:param c: Multiplier constant
:type c: float
:param dev_target: Used for band displacement. Can be a constant or a function
:type dev_target: function or float
:param band_target: Used for band displacement. Can be a constant or a function
:type band_target: function or float
:param center_target: Data column for the band center
:type center_target: str
"""
def return_function(data):
if hasattr(band_target, "name") & hasattr(dev_target, "name"):
column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target.name}_under_{center_target.name}"
elif hasattr(band_target, "name"):
column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target}_under_{center_target.name}"
else:
column_name = f"lower_volatility_band_{c}_times_{band_target}&{dev_target}_under_{center_target.name}"
if column_name not in data.columns:
data[column_name] = center_target - c * dev_target * band_target
return data[column_name].copy()
return return_function | d910c1f9e14fa28b171dd16e937fa65c220839d7 | 708,686 |
def get_limits(data):
""" Get the x, y ranges of the ST data.
"""
y_min = 1e6
y_max = -1e6
x_min = 1e6
x_max = -1e6
for doc in data:
x = doc["x"]
y = doc["y"]
y_min = y if y < y_min else y_min
y_max = y if y > y_max else y_max
x_min = x if x < x_min else x_min
x_max = x if x > x_max else x_max
return x_min, x_max, y_min, y_max | 9e2894626b9de59e94d65affa0a1d1c6f30e6399 | 708,687 |
def proj_helsinki(x, y):
"""Project Helsinki coordinates into ETRS-GK25 (EPSG:3879).
https://www.hel.fi/helsinki/fi/kartat-ja-liikenne/kartat-ja-paikkatieto/paikkatiedot+ja+-aineistot/koordinaatistot_ja+_korkeudet/koordinaatti_ja_korkeusjarjestelmat # pylint: disable=line-too-long
"""
# pylint: disable=invalid-name
output_epsg = "EPSG:3879"
a = 6654650.14636
b = 25447166.49457
c = 0.99998725362
d = -0.00120230340
e = 0.00120230340
f = 0.99998725362
x, y = a + c * x + d * y, b + e * x + f * y
return x, y, output_epsg | d1dc6cc314e767cc971c6b8695d2a4c4043b608a | 708,688 |
def pattern_classifier(data, pattern_threshold):
"""Return an array mask passing our selection."""
return data["key_pattern"] > pattern_threshold | 116a7f84a18b57188fb2ce24fa7ecacd1b61c3da | 708,690 |
def is_scalar(a) -> bool:
"""
Tests if a python object is a scalar (instead of an array)
Parameters
----------
a : object
Any object to be checked
Returns
-------
bool
Whether the input object is a scalar
"""
if isinstance(a, (list, tuple)):
return False
if hasattr(a, "__array__") and hasattr(a, "__len__"): # np.array(1) is scalar
return False
return True | 29206a7921da74257e6af66311c0bbfc4b576ac0 | 708,691 |
import os
def extract_strings_from_file(filename):
"""
extracts strings from a provided filename
Returns the a list of extracted strings found in a provided filename.
Entries are stripped when processing and lines leading with a comment are
ignored.
Args:
filename: the filename
Returns:
the list of strings
"""
filelist = []
if os.path.isfile(filename):
with open(filename) as f:
for raw_line in f:
line = raw_line.strip()
if not line or line.startswith('#'):
continue
filelist.append(line)
return filelist | 7288250ee6682068d7d5217ce0a3030d03f4ca30 | 708,692 |
def get_cred_fh(library: str) -> str:
"""
Determines correct SimplyE credential file
"""
if library == "BPL":
return ".simplyE/bpl_simply_e.yaml"
elif library == "NYPL":
return ".simplyE/nyp_simply_e.yaml"
else:
raise ValueError("Invalid library code passsed") | aefea283c171963778bdc34ddf2f2aeb18fd126d | 708,693 |
import json
def payload_from_api_post_event(event):
"""Maps an API event to the expected payload"""
# event = {
# 'timeserie1': [(1, 100), (2, 100)],
# 'timeserie2': [(3, 100), (4, 100)],
# }
body = json.loads(event['body'])
return body | 897a3d2e846e7bbf96d0acd288924d96b07acc78 | 708,694 |
def format_link_header(link_header_data):
"""Return a string ready to be used in a Link: header."""
links = ['<{0}>; rel="{1}"'.format(data['link'], data['rel'])
for data in link_header_data]
return ', '.join(links) | 9a68ff381d51e6e10fe257d2d2d6766295ffc050 | 708,695 |
import re
def prune_string(string):
"""Prune a string.
- Replace multiple consecutive spaces with a single space.
- Remove spaces after open brackets.
- Remove spaces before close brackets.
"""
return re.sub(
r" +(?=[\)\]\}])",
"",
re.sub(r"(?<=[\(\[\{]) +", "", re.sub(r" +", " ", string)),
) | 53a2c00f50c16b568a75e59bc32a124a5f152b4a | 708,696 |
import json
import requests
def search_full_text(text, ipstreet_api_key):
"""sends input text to /full_text semantic search endpoint. returns json results"""
endpoint = 'https://api.ipstreet.com/v2/full_text'
headers = {'x-api-key': ipstreet_api_key}
payload = json.dumps({'raw_text': str(text),
'q': {
'start_date': '1976-01-01',
'start_date_type': 'application_date',
'end_date': '2017-03-10',
'end_date_type': 'application_date',
'applied': True,
'granted': True,
'expired': True,
'max_expected_results': 500,
'page_size': 500,
}
})
r = requests.post(endpoint, headers=headers, data=payload)
return r.json() | 7112e04698dcfaf3072b30d0085fa2dc18043f76 | 708,697 |
def _get_go2parents(go2parents, goid, goterm):
"""Add the parent GO IDs for one GO term and their parents."""
if goid in go2parents:
return go2parents[goid]
parent_goids = set()
for parent_goterm in goterm.parents:
parent_goid = parent_goterm.id
parent_goids.add(parent_goid)
parent_goids |= _get_go2parents(go2parents, parent_goid, parent_goterm)
go2parents[goid] = parent_goids
return parent_goids | e4585bb84a4ac9532468451036a609a1d561c928 | 708,698 |
def solve_capcha(capcha_str):
"""Function which calculates the solution to part 1
Arguments
---------
capcha_str : str, a string of numbers
Returns
-------
total : int, the sum of adjacent matches
"""
capcha = [int(cc) for cc in list(capcha_str)]
total = 0
for ii in range(len(capcha)):
if capcha[ii] == capcha[ii - 1]:
total += capcha[ii]
return total | 85a74f9b708f8134500d9c7add6e2df8617ec305 | 708,700 |
import functools
def keyword_decorator(deco):
"""Wrap a decorator to optionally takes keyword arguments."""
@functools.wraps(deco)
def new_deco(fn=None, **kwargs):
if fn is None:
@functools.wraps(deco)
def newer_deco(fn):
return deco(fn, **kwargs)
return newer_deco
else:
return deco(fn, **kwargs)
return new_deco | 5ffc100c4fbbf7657c974685ab70dfc903a4abe1 | 708,701 |
def benchmark_summary(benchmark_snapshot_df):
"""Creates summary table for a benchmark snapshot with columns:
|fuzzer|time||count|mean|std|min|25%|median|75%|max|
"""
groups = benchmark_snapshot_df.groupby(['fuzzer', 'time'])
summary = groups['edges_covered'].describe()
summary.rename(columns={'50%': 'median'}, inplace=True)
return summary.sort_values(('median'), ascending=False) | 5cdaa888adb47906659a249076c8a4acb27c6d1d | 708,702 |
def org_repos(info):
"""
处理组织的仓库
:param info: 字典
:return: 两个列表,第一个包含字典(id,全名,url),第二个包含所用到的语言
"""
repo_info = []
languages = []
if info:
for repo in info:
temp = {"id": repo["id"], "full_name": repo["full_name"], "url": repo["url"], "language": repo["language"]}
repo_info.append(temp)
languages.append(repo["language"])
return repo_info, languages | 9d5633bf834845e1301e0fd383a57c42f2bd530c | 708,703 |
import re
def normalize_spaces(s: str) -> str:
"""
連続する空白を1つのスペースに置き換え、前後の空白を削除した新しい文字列を取得する。
"""
return re.sub(r'\s+', ' ', s).strip() | aac95ed5b77b5c65f9ce16cfa685d80c56f0e66f | 708,704 |
def create_abstract_insert(table_name, row_json, return_field=None):
"""Create an abstracted raw insert psql statement for inserting a single
row of data
:param table_name: String of a table_name
:param row_json: dictionary of ingestion data
:param return_field: String of the column name to RETURNING in statement
:return: String of an insert statement
"""
columns = []
for key, value in row_json.items():
if key in columns:
continue
else:
columns.append(key)
values = [':' + item for item in columns]
values = ', '.join(map(str, values))
list_columns = ', '.join(map(str, columns))
if return_field is not None:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ') RETURNING ' + str(return_field)
else:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ')'
return statement | 8b0a960178a0162b7a0c339682541f0f13520d85 | 708,705 |
def combine_index(df, n1, n2):
"""將dataframe df中的股票代號與股票名稱合併
Keyword arguments:
Args:
df (pandas.DataFrame): 此dataframe含有column n1, n2
n1 (str): 股票代號
n2 (str): 股票名稱
Returns:
df (pandas.DataFrame): 此dataframe的index為「股票代號+股票名稱」
"""
return df.set_index(df[n1].str.replace(' ', '') + \
' ' + df[n2].str.replace(' ', '')).drop([n1, n2], axis=1) | 645c62fdc7d8e541c9b55b5f1621d6c442ca683a | 708,706 |
import os
def join(*args):
"""Join multiple path - join('c:', 'pp', 'c.txt') -> 'c:\pp\c.txt'"""
assert len(args) >= 2
ret_arg = args[0]
for arg in args[1:]:
ret_arg = os.path.join(ret_arg, arg)
return ret_arg | f628b0fb47898ad9d98714d4329d2ded183242a3 | 708,707 |
def _str_or_none(value):
"""Helper: serialize value to JSON string."""
if value is not None:
return str(value) | 7aa1550f71accaa4111386153b2c331e2ff076bc | 708,708 |
def convert(secs):
"""Takes a time in seconds and converts to min:sec:msec"""
mins = int(secs // 60)
secs %= 60
msecs = int(round(((secs - int(secs)) * 1000)))
secs = int(secs)
return f'{mins} mins, {secs} secs, {msecs} msecs' | 70752f190f94d3bdb4cb3b562b6bf9d1c7d28479 | 708,709 |
def outer2D(v1, v2):
"""Calculates the magnitude of the outer product of two 2D vectors, v1 and v2"""
return v1[0]*v2[1] - v1[1]*v2[0] | b1f80afa3b8537eb11d79b17d0f12903bec9387c | 708,711 |
def bytes_filesize_to_readable_str(bytes_filesize: int) -> str:
"""Convert bytes integer to kilobyte/megabyte/gigabyte/terabyte equivalent string"""
if bytes_filesize < 1024:
return "{} B"
num = float(bytes_filesize)
for unit in ["B", "KB", "MB", "GB"]:
if abs(num) < 1024.0:
return "{:.1f} {}".format(num, unit)
num /= 1024.0
return "{:.1f} {}".format(num, "TB") | cdeb228de80422f541c5fa682422d77a44d19ca2 | 708,712 |
def import_class(path):
"""
Import a class from a dot-delimited module path. Accepts both dot and
colon seperators for the class portion of the path.
ex::
import_class('package.module.ClassName')
or
import_class('package.module:ClassName')
"""
if ':' in path:
module_path, class_name = path.split(':')
else:
module_path, class_name = path.rsplit('.', 1)
module = __import__(module_path, fromlist=[class_name], level=0)
return getattr(module, class_name) | dcdf71a3bb665dae1fe5913e19be3a4c0aa3c5d3 | 708,714 |
def fixture_make_bucket(request):
"""
Return a factory function that can be used to make a bucket for testing.
:param request: The Pytest request object that contains configuration data.
:return: The factory function to make a test bucket.
"""
def _make_bucket(s3_stub, wrapper, bucket_name, region_name=None):
"""
Make a bucket that can be used for testing. When stubbing is used, a stubbed
bucket is created. When AWS services are used, the bucket is deleted after
the test completes.
:param s3_stub: The S3Stubber object, configured for stubbing or AWS.
:param wrapper: The bucket wrapper object, used to create the bucket.
:param bucket_name: The unique name for the bucket.
:param region_name: The Region in which to create the bucket.
:return: The test bucket.
"""
if not region_name:
region_name = s3_stub.region_name
s3_stub.stub_create_bucket(bucket_name, region_name)
# Bucket.wait_until_exists calls head_bucket on a timer until it returns 200.
s3_stub.stub_head_bucket(bucket_name)
bucket = wrapper.create_bucket(bucket_name, region_name)
def fin():
if not s3_stub.use_stubs and wrapper.bucket_exists(bucket_name):
bucket.delete()
request.addfinalizer(fin)
return bucket
return _make_bucket | bdfbbad1b80f43a1b81f5bf8f69db350128e3304 | 708,715 |
def _get_remote_user():
"""
Get the remote username.
Returns
-------
str: the username.
"""
return input('\nRemote User Name: ') | 5f2bb67b5f55ec053a755c015755f488ab6d8c71 | 708,716 |
def num_range(num):
"""
Use in template language to loop through numberic range
"""
return range(num) | 7b66e4ffd264ea7b49850a9300c3a6c80282fce1 | 708,718 |
def filter_background(bbox, bg_data):
"""
Takes bounding box and background geojson file assumed to be the US states, and outputs a geojson-like dictionary
containing only those features with at least one point within the bounding box, or any state that completely
contains the bounding box.
This tests if a feature contains the bounding box by drawing the box that contains the feature and checking if that
box also contains the bounding box. Because features are odd shapes, this may find that more than one feature
completely contains the bounding box. E.g., if you draw a box around Maryland it will also contain a chunk of West
Virginia. To deal with this, we are allowed to find that multiple states contain the bounding box.
:param bbox: The coordinates of the bounding box as [lon, lat, lon, lat]
:param bg_data: a geojson-like dict describing the background
:return: the features from bg_filename whose borders intersect bbox OR the feature which completely contains bbox
"""
box_lon = [bbox[0], bbox[2]]
box_lat = [bbox[1], bbox[3]]
features = bg_data['features']
in_box = []
for f in features:
starting_len = len(in_box)
# Define points for bounding box around the feature.
feature_max_lat = -90
feature_max_lon = -180
feature_min_lat = 90
feature_min_lon = 180
coordinates = f['geometry']['coordinates']
for group in coordinates:
if len(in_box) > starting_len:
# This feature has already been added
break
# actual points for MultiPolygons are nested one layer deeper than those for polygons
if f['geometry']['type'] == 'MultiPolygon':
geom = group[0]
else:
geom = group
for lon, lat in geom:
# check if any point along the state's borders falls within the bounding box.
if min(box_lon) <= lon <= max(box_lon) and min(box_lat) <= lat <= max(box_lat):
in_box.append(f)
break
# If any point of a feature falls within the bounding box, then the feature cannot contain the box,
# so this only needs to be run if the above if statement is not executed
feature_min_lon = min(feature_min_lon, lon)
feature_min_lat = min(feature_min_lat, lat)
feature_max_lon = max(feature_max_lon, lon)
feature_max_lat = max(feature_max_lat, lat)
# If the box containing a feature also contains the bounding box, keep this feature
# Allow adding more than one because otherwise MD contains boxes in WV, and CA would contain most of NV.
if feature_min_lat < min(box_lat) and feature_max_lat > max(box_lat) and \
feature_min_lon < min(box_lon) and feature_max_lon > max(box_lon):
in_box.append(f)
keepers = {
'type': 'FeatureCollection',
'features': in_box
}
return keepers | f06fe5efe1e3920d8b1092601a121e313da4eec4 | 708,719 |
def rename_columns(table, mapper):
""" Renames the table headings to conform with the ketos naming convention.
Args:
table: pandas DataFrame
Annotation table.
mapper: dict
Dictionary mapping the headings of the input table to the
standard ketos headings.
Returns:
: pandas DataFrame
Table with new headings
"""
return table.rename(columns=mapper) | c9c9228f4f477b8d5ade234964c2540fd20ddd09 | 708,720 |
from typing import Union
import json
def parse_tuple(s: Union[str, tuple]) -> tuple:
"""Helper for load_detections_csv, to parse string column into column of Tuples."""
if isinstance(s, str):
result = s.replace("(", "[").replace(")", "]")
result = result.replace("'", '"').strip()
result = result.replace(",]", "]")
if result:
# print(result)
return tuple(sorted((json.loads(result))))
else:
return tuple()
else:
return s | ad568bfc8ccdf8440378e852daccaf2f24a7e2d0 | 708,721 |
def pair_sorter(aln):
"""Get the alignment name and attributes for sorting."""
return (
aln.name,
not aln.first_in_pair,
aln.unmapped,
aln.supplementary_alignment,
aln.secondary_alignment) | 217eac7c89a12f68f4c9fe324c4feb6c2a955d58 | 708,724 |
def is_private_bool(script_dict):
""" Returns is_private boolean value from user dictionary object """
return script_dict['entry_data']['ProfilePage'][0]['graphql']['user']['is_private'] | 1e8b30a38dc527dc5e2ea73e75c253d8f1a59550 | 708,726 |
def parse_tuple(tuple_string):
"""
strip any whitespace then outter characters.
"""
return tuple_string.strip().strip("\"[]") | d0052dce0582ca04d70455f1833d98545792c8ac | 708,727 |
def autofs():
"""Fixture data from /proc/mounts."""
data = "flux-support -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 flux-support.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/f/flux-support\numms-remills -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 umms-remills.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/u/umms-remills"
return data | ea53c34d863de69c15f1e1247b98599c5f365ab7 | 708,728 |
import shutil
import os
def where(cmd, path=None):
"""
A function to wrap shutil.which for universal usage
"""
raw_result = shutil.which(cmd, os.X_OK, path)
if raw_result:
return os.path.abspath(raw_result)
else:
raise ValueError("Could not find '{}' in the path".format(cmd)) | bbac46386ef955190898e52ee3efa57aac3fa264 | 708,729 |
def score(self, features):
""" return score from ML models"""
assert len(self._models) > 0, 'No valid prediction model'
scores = list()
for feature in features:
# when feature list extraction fails
if not feature:
scores.append(-float('inf'))
continue
item = list()
for ins in self._models:
item.append(ins.inference(feature))
pred = [i for i in item if i]
scores.append(float(sum(pred)/len(pred)))
return scores | 413eb4a0ecdcf0ac4b8f9cf9643b08a839c78b9a | 708,730 |
def fromRGB(rgb):
"""Convert tuple or list to red, green and blue values that can be accessed as follows:
a = fromRGB((255, 255, 255))
a["red"]
a["green"]
a["blue"]
"""
return {"red":rgb[0], "green":rgb[1], "blue":rgb[2]} | 205a8f189d177e7af5cdc686e7c52fd2053a3c87 | 708,731 |
import math
def computeTelescopeTransmission(pars, offAxis):
"""
Compute tel. transmission (0 < T < 1) for a given set of parameters
as defined by the MC model and for a given off-axis angle.
Parameters
----------
pars: list of float
Parameters of the telescope transmission. Len(pars) should be 4.
offAxis: float
Off-axis angle in deg.
Returns
-------
float
Telescope transmission.
"""
_degToRad = math.pi / 180.0
if pars[1] == 0:
return pars[0]
else:
t = math.sin(offAxis * _degToRad) / (pars[3] * _degToRad)
return pars[0] / (1.0 + pars[2] * t ** pars[4]) | 50b2e2908726b8a77bc83a2821cf760b7475300b | 708,732 |
def guarantee_trailing_slash(directory_name: str) -> str:
"""Adds a trailling slash when missing
Params:
:directory_name: str, required
A directory name to add trailling slash if missing
Returns:
A post processed directory name with trailling slash
"""
if not directory_name.endswith('/'):
return directory_name + '/'
return directory_name | 38cfdf971262fceb4888277522b22ba7276fa9b7 | 708,733 |
from pathlib import Path
def output_file_path(status_id, phase):
"""
"""
BASE_DIR = Path(__file__).resolve().parent.parent
return f"%s/logs/stage/{status_id}-{phase}.txt" %str(BASE_DIR) | 3bcbd80ad95389b9cf37fa66923bacb819ede710 | 708,734 |
def clean(some_string, uppercase=False):
"""
helper to clean up an input string
"""
if uppercase:
return some_string.strip().upper()
else:
return some_string.strip().lower() | cdc4587b762625e00c91189950bd45840861c93f | 708,735 |
def pretty_print_large_number(number):
"""Given a large number, it returns a string of the sort: '10.5 Thousand' or '12.3 Billion'. """
s = str(number).ljust(12)
if number > 0 and number < 1e3:
pass
elif number >= 1e3 and number < 1e6:
s = s + " (%3.1f Thousand)" % (number * 1.0 / 1e3)
elif number >= 1e6 and number < 1e9:
s = s + " (%3.1f Million)" % (number * 1.0 / 1e6)
elif number >= 1e9 and number < 1e12:
s = s + " (%3.1f Billion)" % (number * 1.0 / 1e9)
elif number >= 1e12 and number < 1e15:
s = s + " (%3.1f Trillion)" % (number * 1.0 / 1e12)
return s | 6762f34744da360b36d4a4fc0659fcf7d3fb0465 | 708,736 |
def find_all_visit(tx):
"""
Method that queries the database to find all VISIT relationships
:param tx: session
:return: nodes of Person , Location
"""
query = (
"""
MATCH (p:Person)-[r:VISIT]->(l:Location)
RETURN p , ID(p) , r , r.start_hour , r.end_hour , r.date , l , ID(l)
"""
)
result = tx.run(query).data()
return result | 851d790b16f9db285a6d09b5cabc4e12ad364484 | 708,737 |
def get_defense_type(action: int, game_config) -> int:
"""
Utility method for getting the defense type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
defense_type = action % (game_config.num_attack_types+1) # +1 for detection
return defense_type | 68a05cf15bd833fb24aa448b8be2d08c1a949d12 | 708,738 |
from typing import Optional
import os
def _get_eula_date(extract_path: str) -> Optional[str]:
"""Get any EULA accept date in the install script, if any.
:param extract_path: The path to the extracted archive.
:return: The EULA date, if any.
"""
install_script = os.path.join(extract_path, "houdini.install")
if not os.path.exists(install_script):
return None
with open(install_script) as handle:
for line in handle:
if line.startswith("LICENSE_DATE"):
return line.split("=")[1].strip()
return None | 044ff76dadf7c4bdcd74b1435e02d2ec29c0a877 | 708,739 |
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x | f6f55ff17ba29aab5946c682b825c72eb70324dd | 708,740 |
def return_json():
"""
Sample function that has been given a different name
"""
print("Tooler should render out the JSON value returned")
return {"one": 1, "deep": {"structure": ["example"]}} | bf28fab61cabfc3a4f30736e58490d5df6702dc2 | 708,743 |
def reg2deg(reg):
"""
Converts phase register values into degrees.
:param cycles: Re-formatted number of degrees
:type cycles: int
:return: Number of degrees
:rtype: float
"""
return reg*360/2**32 | c7dbd6119ad3bce9261fb3d78a369251ade2d8af | 708,744 |
def plot3dOnFigure(ax, pixels, colors_rgb,axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))):
"""Plot pixels in 3D."""
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
return ax | 067219abba7f77f7c4fbb4404ff16a3f5192f7cd | 708,745 |
import numpy
def ellipse(a, b, center=(0.0, 0.0), num=50):
"""Return the coordinates of an ellipse.
Parameters
----------
a : float
The semi-major axis of the ellipse.
b : float
The semi-minor axis of the ellipse.
center : 2-tuple of floats, optional
The position of the center of the ellipse;
default: (0.0, 0.0)
num : integer, optional
The number of points on the upper side of the ellipse.
The number includes the leading and trailing edges.
Thus, the total number of points will be 2 * (num - 1);
default: 50.
Returns
-------
x : numpy.ndarray
The x-coordinates of the ellipse as a 1D array of floats.
y: numpy.ndarray
The y-coordinates of the ellipse as a 1D array of floats.
"""
xc, yc = center
x_upper = numpy.linspace(xc + a, xc - a, num=num)
y_upper = b / a * numpy.sqrt(a**2 - x_upper**2)
x_lower = numpy.linspace(xc - a, xc + a, num=num)[1:-1]
y_lower = -b / a * numpy.sqrt(a**2 - x_lower**2)
x = numpy.concatenate((x_upper, x_lower))
y = numpy.concatenate((y_upper, y_lower))
return x, y | bd4d4663981a0431e40b20d38cc48a7f2476c13b | 708,746 |
import os
def pinghost(host):
"""
Ping target with a 1-second timeout limit
:param str host: Destination to reach. IP address or domain name
:returns: True if reached, otherwise False
"""
host = str(host).split(':')[0] # leave off the port if exists
# print "Pinging"
if os.name == 'posix':
target = "ping -W1 -c 1 " + host + " > /dev/null 2>&1 "
else:
target = "ping " + host + " -w 1000 -n 1 > nul 2>&1"
response = os.system(target)
# Note:original response is 1 for fail; 0 for success; so we flip it
return not response | f0e6d84edf1093580159d08359bfd61adeb3b987 | 708,747 |
def server_hello(cmd, response):
"""Test command
"""
return response | 7e0cc03d1b64afb1a4fc44264096e6888ddb5df2 | 708,748 |
def _get_controller_of(pod):
"""Get a pod's controller's reference.
This uses the pod's metadata, so there is no guarantee that
the controller object reference returned actually corresponds to a
controller object in the Kubernetes API.
Args:
- pod: kubernetes pod object
Returns: the reference to a controller object
"""
if pod["metadata"].get("ownerReferences"):
for owner_ref in pod["metadata"]["ownerReferences"]:
if owner_ref.get("controller"):
return owner_ref
return None | 9c9e58e2fc49729c618af2c5bb9b4d033d90a831 | 708,749 |
import subprocess
def get_cluster_cids():
"""return list of CIDs with pin types"""
output = subprocess.check_output([
'docker-compose', 'exec', '-T', 'cluster', 'ipfs-cluster-ctl', 'pin',
'ls'
])
return [
'-'.join([l.split()[0], l.split()[-1].lower()])
for l in output.decode('utf-8').splitlines()
] | c38e2742fa0476e2240d9759c6d8525a3add083b | 708,750 |
def parse_record1(raw_record):
"""Parse raw record and return it as a set of unique symbols without \n"""
return set(raw_record) - {"\n"} | 4ffd3ebd0aaa17ddd42baf3b9d44614784c8ff33 | 708,751 |
import math
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
) | ef18dede8312f78b4ba4258e87d4630863f1243c | 708,753 |
def combine(arr):
""" makes overlapping sequences 1 sequence """
def first(item):
return item[0]
def second(item):
return item[1]
if len(arr) == 0 or len(arr) == 1:
return arr
sarr = []
for c, val in enumerate(arr):
sarr.append((val[0], val[1], c))
sarr = sorted(sarr, key = second)
sarr = sorted(sarr, key = first)
chains = [[sarr[0][0], sarr[0][1], [sarr[0][2]]]]
for s, e, c in sarr[1:]: #start, end, counter
if s <= chains[-1][1] +1:
chains[-1][1] = max(e, chains[-1][1])
chains[-1][2].append(c)
else:
chains.append([s, e, [c]])
return chains | b46bb7f73fa6857ed4c980bdbdff77acde64b18d | 708,754 |
from operator import sub
def sub_fft(f_fft, g_fft):
"""Substraction of two polynomials (FFT representation)."""
return sub(f_fft, g_fft) | a559429a4d10889be3ffa776153854248ac7a496 | 708,755 |
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy_demo.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output | 5508f1681eaa3f2c5ccb44b6329ad012f85c42e8 | 708,756 |
import uuid
def generate_code() -> str:
"""Generates password reset code
:return: Password reset code
:rtype: str
"""
return str(uuid.uuid4()) | bcd8377afd5598e71f8bb8eb217c3f3fd53fc5c7 | 708,758 |
import pickle
import os
def cached(path: str, validate: bool = False):
"""Similar to ``define``, but cache to a file.
:param path:
the path of the cache file to use
:param validate:
if `True`, always execute the function. The loaded result will be
passed to the function, when the cache exists. In that case the
function should return the value to use. If the returned value is
not identical to the loaded value, the cache is updated with the
new value.
Usage::
@cached('./cache/result')
def dataset():
...
return result
or::
@cached('./cache/result', validate=True)
def model(result=None):
if result is not None:
# running to validate ...
return result
"""
def update_cache(result):
print("save cache", path)
with open(path, "wb") as fobj:
pickle.dump(result, fobj)
def load_cache():
print("load cache", path)
with open(path, "rb") as fobj:
return pickle.load(fobj)
def decorator(func):
if os.path.exists(path):
result = load_cache()
if not validate:
return result
else:
print("validate")
new_result = func(result)
if new_result is not result:
update_cache(new_result)
return new_result
else:
print("compute")
result = func()
update_cache(result)
return result
return decorator | d4b5b861bf43294d3e5f84b57f648a2e32b6428b | 708,759 |
def play(p1:list[int], p2:list[int]) -> list[int]:
"""Gets the final hand of the winning player"""
while p1 and p2:
a = p1.pop(0)
b = p2.pop(0)
if a > b:
p1 += [a, b]
else:
p2 += [b, a]
return p1 + p2 | 2a2b561474b3cd0841dcbe881e74b4767b4102b1 | 708,760 |
def marks(family, glyph):
"""
:param family:
:param glyph:
:return: True when glyph has at least one anchor
"""
has_mark_anchor = False
for anchor in glyph.anchors:
if anchor.name:
if anchor.name.startswith("_"):
has_mark_anchor = True
break
return has_mark_anchor | 101555dcadfd78b0550606f843e32dce99de62b8 | 708,761 |
import os
def check_file(filename):
"""Check if "filename" exists and is a file.
Returns:
True if file exists and is a file.
False if filename==None or is not a file.
"""
file_ok = True
error_mssg = ""
if(filename == None):
error_mssg = "Error: file is missing."
file_ok = False
else:
if not os.path.isfile(filename):
error_mssg = "Error: '"+str(filename)+"' is not a file."
file_ok = False
return file_ok, error_mssg | 4a9e9284648c5d6222a44f1156b198f6e64dd409 | 708,764 |
import hashlib
def calculate_file_sha256(file_path):
"""calculate file sha256 hash code."""
with open(file_path, 'rb') as fp:
sha256_cal = hashlib.sha256()
sha256_cal.update(fp.read())
return sha256_cal.hexdigest() | bfa7a43516e51a80ccd63ea3ace6be6e5e9dd2c0 | 708,765 |
def prod(*args: int) -> int:
"""
This function is wrapped and documented in `_polymorphic.prod()`.
"""
prod_ = 1
for arg in args:
prod_ *= arg
return prod_ | eec30bf6339280173e0e2fa517558e6a452b9c37 | 708,766 |
import numpy
def shuffle_and_split_data(data_frame):
"""
Shuffle and split the data into 2 sets: training and validation.
Args:
data_frame (pandas.DataFrame): the data to shuffle and split
Returns:
2 numpy.ndarray objects -> (train_indices, validation_indices)
Each hold the index positions for data in the pandas.DataFrame
"""
shuffled_indices = numpy.random.permutation(len(data_frame))
train_up_to = int(len(data_frame) * 0.7)
train_indices = shuffled_indices[:train_up_to]
validation_indices = shuffled_indices[train_up_to:]
return train_indices, validation_indices | dfcad7edb9ec17b81057e00816fe3d5bdadc39be | 708,767 |
def Maj(x, y, z):
""" Majority function: False when majority are False
Maj(x, y, z) = (x ∧ y) ⊕ (x ∧ z) ⊕ (y ∧ z)
"""
return (x & y) ^ (x & z) ^ (y & z) | 7d4013dfc109b4fc39fd3b0bd3f2f5947d207ff0 | 708,768 |
def create_dictionary(timestamp, original_sentence, sequence_switched, err_message, suggestion_list):
"""Create Dictionary Function
Generates and exports a dictionary object with relevant data for website interaction to take place.
"""
if len(suggestion_list) != 0:
err_message_str = "Possible error: " + err_message + "\n \n"
new_dictionary = {
"timestamp": timestamp,
"original_sentence": original_sentence,
"masked_sentence": sequence_switched,
"err_message": err_message,
"possible_corrections": suggestion_list
}
return new_dictionary
else:
return {} | 057d407089a7bb4e445bd0db2632dfcb9f291ed6 | 708,769 |
def rigidBlades(blds, hub=None, r_O=[0,0,0]):
""" return a rigid body for the three blades
All bodies should be in a similar frame
"""
blades = blds[0].toRigidBody()
for B in blds[1:]:
B_rigid = B.toRigidBody()
blades = blades.combine(B_rigid, r_O=r_O)
blades.name='blades'
return blades | 89b48ba43f748fa4b2db7ee768eabe9e79e9a453 | 708,770 |
def response_json(status, message, response):
"""
Helper method that converts the given data in json format
:param success: status of the APIs either true or false
:param data: data returned by the APIs
:param message: user-friendly message
:return: json response
"""
data = {
"status": status,
"message": message,
"response": response,
}
return data | 9c7e30e81c5412998bc8523b0e45a353c82b5a41 | 708,771 |
def NDVI(R, NIR):
""" Compute the NDVI
INPUT : R (np.array) -> the Red band images as a numpy array of float
NIR (np.array) -> the Near Infrared images as a numpy array of float
OUTPUT : NDVI (np.array) -> the NDVI
"""
NDVI = (NIR - R) / (NIR + R + 1e-12)
return NDVI | aa1789c80720c09aa464b3ae67da7de821e2ba97 | 708,772 |
import re
def getPredictedAnchor(title: str) -> str:
"""Return predicted anchor for given title, usually first letter."""
title = title.lower()
if title.startswith('npj '):
return 'npj series'
title = re.sub(r'^(the|a|an|der|die|das|den|dem|le|la|les|el|il)\s+', '',
title)
return title[0].upper() | 972eaa495078bc3929967a052f031c50d439fbdc | 708,773 |
import re
def convert_check_filter(tok):
"""Convert an input string into a filter function.
The filter function accepts a qualified python identifier string
and returns a bool.
The input can be a regexp or a simple string. A simple string must
match a component of the qualified name exactly. A regexp is
matched against the entire qualified name.
Matches are case-insensitive.
Examples::
convert_check_filter('foo')('a.foo.b') == True
convert_check_filter('foo')('a.foobar') == False
convert_check_filter('foo.*')('a.foobar') == False
convert_check_filter('foo.*')('foobar') == True
"""
tok = tok.lower()
if '+' in tok or '*' in tok:
return re.compile(tok, re.I).match
else:
toklist = tok.split('.')
def func(name):
chunks = name.lower().split('.')
if len(toklist) > len(chunks):
return False
for i in range(len(chunks)):
if chunks[i:i + len(toklist)] == toklist:
return True
return False
return func | 9d1aaa9a5007371e4f33ce3b4fbc86edd15875c6 | 708,774 |
def _get_operations(rescale=0.003921, normalize_weight=0.48):
"""Get operations."""
operation_0 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'RandomCrop',
'weight': [32, 32, 4, 4, 4, 4],
'padding_mode': "constant",
'pad_if_needed': False,
'fill_value': 0
}
operation_1 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Rescale',
'rescale': rescale,
'shift': 0,
'num_classes': 10
}
operation_2 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Normalize',
'weights': [normalize_weight]
}
return [operation_0, operation_1, operation_2] | a3bab4147f1a2020fb87853fc30bede277f0f4bd | 708,776 |
def project(name, param):
"""a tilemill project description, including a basic countries-of-the-world layer."""
return {
"bounds": [-180, -85.05112877980659, 180, 85.05112877980659],
"center": [0, 0, 2],
"format": "png",
"interactivity": False,
"minzoom": 0,
"maxzoom": 22,
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"Stylesheet": ["style.mss"],
"Layer": [
{
"id": "countries",
"name": "countries",
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"geometry": "polygon",
"Datasource": {
"file": "http://mapbox-geodata.s3.amazonaws.com/natural-earth-1.4.0/"
"cultural/10m-admin-0-countries.zip",
"type": "shape"
}
},
],
"scale": 1,
"metatile": 2,
"name": name,
"description": param['properties']['name'],
} | 9609c523cccc99168bbc0e7dbf10fe8624d399c2 | 708,777 |
import torch
def ppg_acoustics_collate(batch):
"""Zero-pad the PPG and acoustic sequences in a mini-batch.
Also creates the stop token mini-batch.
Args:
batch: An array with B elements, each is a tuple (PPG, acoustic).
Consider this is the return value of [val for val in dataset], where
dataset is an instance of PPGSpeechLoader.
Returns:
ppg_padded: A (batch_size, feature_dim_1, num_frames_1) tensor.
input_lengths: A batch_size array, each containing the actual length
of the input sequence.
acoustic_padded: A (batch_size, feature_dim_2, num_frames_2) tensor.
gate_padded: A (batch_size, num_frames_2) tensor. If "1" means reaching
stop token. Currently assign "1" at the last frame and the padding.
output_lengths: A batch_size array, each containing the actual length
of the output sequence.
"""
# Right zero-pad all PPG sequences to max input length.
# x is (PPG, acoustic), x[0] is PPG, which is an (L(varied), D) tensor.
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([x[0].shape[0] for x in batch]), dim=0,
descending=True)
max_input_len = input_lengths[0]
ppg_dim = batch[0][0].shape[1]
ppg_padded = torch.FloatTensor(len(batch), max_input_len, ppg_dim)
ppg_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
curr_ppg = batch[ids_sorted_decreasing[i]][0]
ppg_padded[i, :curr_ppg.shape[0], :] = curr_ppg
# Right zero-pad acoustic features.
feat_dim = batch[0][1].shape[1]
max_target_len = max([x[1].shape[0] for x in batch])
# Create acoustic padded and gate padded
acoustic_padded = torch.FloatTensor(len(batch), max_target_len, feat_dim)
acoustic_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
curr_acoustic = batch[ids_sorted_decreasing[i]][1]
acoustic_padded[i, :curr_acoustic.shape[0], :] = curr_acoustic
gate_padded[i, curr_acoustic.shape[0] - 1:] = 1
output_lengths[i] = curr_acoustic.shape[0]
ppg_padded = ppg_padded.transpose(1, 2)
acoustic_padded = acoustic_padded.transpose(1, 2)
return ppg_padded, input_lengths, acoustic_padded, gate_padded,\
output_lengths | 1357a8a9fa901a9be4f79ea13fd5ae7c3810bbeb | 708,778 |
import torch
def compute_batch_jacobian(input, output, retain_graph=False):
"""
Compute the Jacobian matrix of a batch of outputs with respect to
some input (normally, the activations of a hidden layer).
Returned Jacobian has dimensions Batch x SizeOutput x SizeInput
Args:
input (list or torch.Tensor): Tensor or sequence of tensors
with the parameters to which the Jacobian should be
computed. Important: the requires_grad attribute of input needs to
be True while computing output in the forward pass.
output (torch.Tensor): Tensor with the values of which the Jacobian is
computed
Returns (torch.Tensor): 3D tensor containing the Jacobian of output with
respect to input: batch_size x output_size x input_size.
"""
batch_jacobian = torch.Tensor(output.shape[0], output.shape[1], input.shape[1])
assert output.shape[0] == input.shape[0], \
"Batch size needs to be the same for both input and output"
for batch_idx in range(output.shape[0]):
for i, output_elem in enumerate(output[batch_idx]):
if i < output.shape[1]: rg = True
else: rg = retain_graph
gradients = torch.autograd.grad(output_elem, input, retain_graph=rg)[0][batch_idx].detach()
batch_jacobian[batch_idx, i, :] = gradients
return batch_jacobian | c18f596a3500f2f82e2b4716e6f9892a01fb31c7 | 708,779 |
def is_associative(value):
"""Checks if `value` is an associative object meaning that it can be
accessed via an index or key
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is associative.
Example:
>>> is_associative([])
True
>>> is_associative({})
True
>>> is_associative(1)
False
>>> is_associative(True)
False
.. versionadded:: 2.0.0
"""
return hasattr(value, '__getitem__') | 5d2a9e0e69ad793a98657dc13b26f79900f29294 | 708,780 |
def join_audio(audio1, audio2):
"""
>>> join_audio(([1], [4]), ([2, 3], [5, 6]))
([1, 2, 3], [4, 5, 6])
"""
(left1, right1) = audio1
(left2, right2) = audio2
left = left1 + left2
right = right1 + right2
audio = (left, right)
return audio | 23348b746469d362fd66371d61142b4227814ff3 | 708,781 |
def csi_from_sr_and_pod(success_ratio_array, pod_array):
"""Computes CSI (critical success index) from success ratio and POD.
POD = probability of detection
:param success_ratio_array: np array (any shape) of success ratios.
:param pod_array: np array (same shape) of POD values.
:return: csi_array: np array (same shape) of CSI values.
"""
return (success_ratio_array ** -1 + pod_array ** -1 - 1.) ** -1 | 84952fe6f7c8bd780c64c53183342ab0d8f3f90f | 708,782 |
def get_account_number(arn):
"""
Extract the account number from an arn.
:param arn: IAM SSL arn
:return: account number associated with ARN
"""
return arn.split(":")[4] | 3d0fe552691ae98cf0dc70bc2055297f01a5d800 | 708,783 |
def first_index_k_zeros_left(qstr, k, P):
"""
For a binary string qstr, return the first index of q with k (mod P) zeros to the left.
Return: index in [0, qstr.length]
"""
num_zeros_left = 0
for j in range(qstr.length+1):
if (num_zeros_left - k) % P == 0:
return j
if j == qstr.length:
raise Exception("No valid position found")
if qstr[j] == 0:
num_zeros_left += 1 | 62e505290fb32b43860deae3477dec718028e7af | 708,785 |
def all_equal(values: list):
"""Check that all values in given list are equal"""
return all(values[0] == v for v in values) | 8ed08f63959367f3327554adc11b1286291963d8 | 708,786 |
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None | 035c8bf68b4ff7e4fbdb7ed1b2601f04110287d8 | 708,787 |
import math
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | ebfd9a84885a95ec6e4e7b2d88a0fb69fbbfaea1 | 708,788 |
def indexate(points):
"""
Create an array of unique points and indexes into this array.
Arguments:
points: A sequence of 3-tuples
Returns:
An array of indices and a sequence of unique 3-tuples.
"""
pd = {}
indices = tuple(pd.setdefault(tuple(p), len(pd)) for p in points)
pt = sorted([(v, k) for k, v in pd.items()], key=lambda x: x[0])
unique = tuple(i[1] for i in pt)
return indices, unique | f78ef40ea9bf6cfe427d366026b633fbb67016a2 | 708,789 |
import re
def install_package_family(pkg):
"""
:param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin
:return: device_type of the installed image ie asr900
"""
img_dev = None
m = re.search(r'(asr\d+)\w*', pkg)
if m:
img_dev = m.group(1)
return img_dev | b344d51ae426e167dbd2397ab93cbf8707b01496 | 708,790 |
from typing import List
import json
import os
def _ignored_jenkins_node_names() -> List[str]:
"""
Ignore nodes with these names
:return: Config list
"""
return json.loads(os.environ['IGNORED_JENKINS_NODE_NAMES']) | cec9685517cb1344bbf1ec7a6352e6727d7e80e2 | 708,792 |
def recipe_clone_message(recipe):
"""
Renders the recipe clone message.
"""
return dict(recipe=recipe) | 09728b431966b12415861a212f2cb85af475dc37 | 708,793 |
def get_drawdowns(cum_returns):
"""
Computes the drawdowns of the cumulative returns.
Parameters
----------
cum_returns : Series or DataFrame, required
a Series or DataFrame of cumulative returns
Returns
-------
Series or DataFrame
"""
cum_returns = cum_returns[cum_returns.notnull()]
highwater_marks = cum_returns.expanding().max()
drawdowns = cum_returns/highwater_marks - 1
return drawdowns | 1f4da9e405b8b4f8a691b09e42e479cd6fdec3ae | 708,794 |