content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def cube(x):
"""return x^3"""
return x*x*x | df9aa4330b7cfb1946b3c403935c864a2e7fae7a | 708,006 |
import os
from pathlib import Path
def is_root() -> bool:
"""
Checks whether the current user is root (or, on Windows, an administrator).
"""
if os.name == 'nt':
try:
_dummy = list((Path(os.environ.get('SystemRoot', 'C:\\Windows')) / 'Temp').iterdir())
return True
except OSError:
return False
else:
return os.geteuid() == 0 | b6fb87fd0a8daab882e506aa5e289d83eb61c242 | 708,007 |
from typing import Tuple
def load_forcings_gauge_metadata(path: str) -> Tuple[float, float, float]:
"""
Loads gauge metadata from the header of a CAMELS-USE forcings file.
Parameters
----------
path: str
Path to the forcings file.
Returns
-------
tuple
(gauge latitude, gauge elevation, basin area [m²])
"""
with open(path, 'r') as file:
latitude = float(file.readline())
elevation = float(file.readline())
area = float(file.readline())
return latitude, elevation, area | c91c3bafb83709967d6dd480afd8e53ac9f94445 | 708,008 |
def append_artist(songs, artist):
"""
When the songs gathered from the description just contains the
titles of the songs usually means it's an artist's album.
If an artist was provided appends the song title to the artist
using a hyphen (artist - song)
:param list songs: List of song titles (only song title)
:param str artist: Artist to search for with the song names
:return list: song titles along with the artist
"""
songs_complete = []
for song in songs:
song_complete = f'{artist} - {song}'
songs_complete.append(song_complete)
return songs_complete | b3fbda311849f68ab01c2069f44ea0f694365270 | 708,010 |
def get_missing_columns(missing_data):
"""
Returns columns names as list that containes missing data
:param
missing_data : return of missing_data(df)
:return
list: list containing columns with missing data
"""
missing_data = missing_data[missing_data['percent'] > 0]
missing_columns = missing_data.index.tolist()
return missing_columns | 80feccec6148a417b89fb84f4c412d9ea4d0dd37 | 708,011 |
def get_function_name(fcn):
"""Returns the fully-qualified function name for the given function.
Args:
fcn: a function
Returns:
the fully-qualified function name string, such as
"eta.core.utils.function_name"
"""
return fcn.__module__ + "." + fcn.__name__ | ae186415225bd5420de7f7b3aef98480d30d59f8 | 708,012 |
def clean_cases(text):
"""
Makes text all lowercase.
:param text: the text to be converted to all lowercase.
:type: str
:return: lowercase text
:type: str
"""
return text.lower() | 9b0c931336dbf762e5e3a18d103706ddf1e7c14f | 708,013 |
def filter_0_alleles(allele_df, allele_num=2):
"""Drop alleles that do not appear in any of the strains.
"""
drop_cols = []
for col in allele_df.columns:
if allele_df[col].sum()<allele_num:
drop_cols.append(col)
allele_df.drop(drop_cols, inplace=True, axis=1)
return allele_df | 9b76152d6e6fc200c2d80d4721122d3958642286 | 708,015 |
def gradient_of_rmse(y_hat, y, Xn):
"""
Returns the gradient of the Root Mean Square error with respect to the
parameters of the linear model that generated the prediction `y_hat'.
Hence, y_hat should have been generated by a linear process of the form
Xn.T.dot(theta)
Args:
y_hat (np.array of shape N,): The predictions of the linear model
y (np.array of shape N,): The "ground-truth" values.
Returns:
The RMSE between y_hat and y
"""
N = y.shape[0]
assert N > 0, ('At least one sample is required in order to compute the '
'RMSE loss')
losses = y - y_hat
gradient = - 2 * Xn.T.dot(losses) / N
return gradient | 73a46197f90cf1b9c0a90a8ce2d2eae006c6d002 | 708,016 |
def align_down(x: int, align: int) -> int:
"""
Align integer down.
:return:
``y`` such that ``y % align == 0`` and ``y <= x`` and ``(x - y) < align``
"""
return x - (x % align) | 8144309badf601999f4c291ee3af5cfbd18397ea | 708,017 |
import glob
import csv
def write_colocated_data_time_avg(coloc_data, fname):
"""
Writes the time averaged data of gates colocated with two radars
Parameters
----------
coloc_data : dict
dictionary containing the colocated data parameters
fname : str
file name where to store the data
Returns
-------
fname : str
the name of the file where data has written
"""
filelist = glob.glob(fname)
if not filelist:
with open(fname, 'w', newline='') as csvfile:
csvfile.write('# Colocated radar gates data file\n')
csvfile.write('# Comment lines are preceded by "#"\n')
csvfile.write('#\n')
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
else:
with open(fname, 'a', newline='') as csvfile:
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
return fname | 2e786c6df8a617f187a7b50467111785342310c5 | 708,019 |
def augment_note_matrix(nmat, length, shift):
"""Pitch shift a note matrix in R_base format."""
aug_nmat = nmat.copy()
aug_nmat[0: length, 1] += shift
return aug_nmat | a1ff855266e44012e05347a95abfa5324fd6e4e6 | 708,020 |
def breed_list(request):
""" Фикстура возвращает список всех пород собак """
return request.param | 29394d8a97444680acc3a0b7ff0f1b2949a5609d | 708,021 |
def tail(file, n=1, bs=1024):
""" Read Last n Lines of file
credit:
https://www.roytuts.com/read-last-n-lines-from-file-using-python/
https://github.com/roytuts/python/blob/master/read-lines-from-last/last_lines_file.py
"""
f = open(file)
f.seek(0, 2)
l = 1-f.read(1).count('\n')
B = f.tell()
while n >= l and B > 0:
block = min(bs, B)
B -= block
f.seek(B, 0)
l += f.read(block).count('\n')
f.seek(B, 0)
l = min(l, n)
lines = f.readlines()[-l:]
f.close()
return lines | db7443e4af1028565491cb06944717488506b2b7 | 708,022 |
def null_count(df):
"""
df is a dataframe
Check a dataframe for nulls and return the number of missing values.
"""
return df.isnull().sum().sum() | 6e3eb91a3eaec456bb828b44be0780b64470e823 | 708,023 |
import subprocess
def currentGUIusers():
"""Gets a list of GUI users by parsing the output of /usr/bin/who"""
gui_users = []
proc = subprocess.Popen('/usr/bin/who', shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[0].decode("UTF-8")
lines = output.splitlines()
for line in lines:
if 'console' in line:
parts = line.split()
gui_users.append(parts[0])
# 10.11 sometimes has a phantom '_mbsetupuser' user. Filter it out.
users_to_ignore = ['_mbsetupuser']
gui_users = [user for user in gui_users if user not in users_to_ignore]
return gui_users | 69c7b62ea3b3759b5efc64c68fe0110f59fda9db | 708,024 |
import glob
import os
import csv
def extract(func):
"""
Decorator function. Open and extract data from CSV files. Return list of dictionaries.
:param func: Wrapped function with *args and **kwargs arguments.
"""
def _wrapper(*args):
out = []
instance, prefix = args
for fname in glob.glob(os.path.join(getattr(instance, 'directory'), *prefix)):
with open(fname) as g:
out.extend(func(instance, data=csv.DictReader(g)))
return out
return _wrapper | 2a369596b6b26edc7259f27e7f594bd997d04f48 | 708,025 |
import os
def list_all_projects(path):
"""lists all projects from a folder"""
project_names = []
for file in os.listdir(path):
if not os.path.isfile(os.path.join(path, file)):
continue
project_names.append(file.split('.')[0])
return project_names | f40c9c5868b49170b2dcd7326e0effe2ce99ee45 | 708,026 |
def get_trail_max(self, rz_array=None):
"""
Return the position of the blob maximum. Either in pixel or in (R,Z) coordinates if rz_array
is passed.
"""
if (rz_array is None):
return self.xymax
# Remember xycom[:,1] is the radial (X) index which corresponds to R
return rz_array[self.xymax[:,0].astype('int'), self.xymax[:,1].astype('int'), :] | 5456c95ba4cb02352aa69398f9fa5307f3dc8e06 | 708,027 |
from typing import Tuple
from typing import List
def create_annotation(annotation_id: int, image_id: int, category_id: int, is_crowd: int, area: int,
bounding_box: Tuple[int, int, int, int], segmentation: List[Tuple[int, int]]) -> dict:
"""
Converts input data to COCO annotation information storing format.
:param int annotation_id: unique identificator of the annotation
:param int image_id: identificator of related image
:param int category_id: identificator of related category (annotation class)
:param int is_crowd:
"iscrowd": 0 if your segmentation based on polygon (object instance)
"iscrowd": 1 if your segmentation based uncompressed RLE (crowd)
:param float area: area occupied by segmentation in pixels
:param Tuple[float, float, float, float] bounding_box:
coordinates of bbox in format (x,y,w,h)
:param list segmentation: polygon coordinates
:return: dict of the annotation information in COCO format
"""
return {
"id": annotation_id,
"image_id": image_id,
"category_id": category_id,
"iscrowd": is_crowd,
"area": area, # float
"bbox": bounding_box, # [x,y,width,height]
"segmentation": segmentation # [polygon]
} | 715a6204ed5dd9b081ac6e87541df3cd46d329a1 | 708,029 |
def asset_name(aoi_model, model, fnf=False):
"""return the standard name of your asset/file"""
prefix = "kc_fnf" if fnf else "alos_mosaic"
filename = f"{prefix}_{aoi_model.name}_{model.year}"
if model.filter != "NONE":
filename += f"_{model.filter.lower()}"
if model.rfdi:
filename += "_rfdi"
if model.ls_mask:
filename += "_masked"
if model.dB:
filename += "_dB"
if model.texture:
filename += "_texture"
if model.aux:
filename += "_aux"
return filename | e7211bec70739e53280ce424e1cb3c4c4304ac54 | 708,031 |
def rgb2hex(rgb_color):
""" 'rgb(180, 251, 184)' => '#B4FBB8' """
rgb = [int(i) for i in rgb_color.strip('rgb()').split(',')]
return '#{:02x}{:02x}{:02x}'.format(rgb[0], rgb[1], rgb[2]) | 40a01ccc5695266aebaf63a169c1039a6f42a724 | 708,032 |
def execute(cursor, query):
"""Secure execute for slow nodes"""
while True:
try:
cursor.execute(query)
break
except Exception as e:
print("Database query: {} {}".format(cursor, query))
print("Database retry reason: {}".format(e))
return cursor | b46338ab7304737d3b12cb1bd4d4dff9665d0f60 | 708,033 |
import zipfile
def zip_to_gdal_path(filepath):
"""
Takes in a zip filepath and if the zip contains files
ascii files, prepend '/viszip' to the path
so that they can be opened using GDAL without extraction.
"""
zip_file_list = []
if zipfile.is_zipfile(filepath):
try:
zip_file = zipfile.ZipFile(filepath)
zip_file_contents = ['/vsizip/{0}/{1}'.format(filepath, zip_info_object.filename) for zip_info_object in zip_file.filelist if zip_info_object.filename.endswith('.asc')]
zip_file_list.extend(zip_file_contents)
zip_file.close()
except zipfile.BadZipfile:
pass
return zip_file_list | 9e9e44d6eb3022ebe982cc44284da76f56a4ddeb | 708,034 |
def is_eval_epoch(cfg, cur_epoch):
"""
Determine if the model should be evaluated at the current epoch.
Args:
cfg (CfgNode): configs. Details can be found in
sgs/config/defaults.py
cur_epoch (int): current epoch.
"""
return (
cur_epoch + 1
) % cfg.TRAIN.EVAL_PERIOD == 0 or cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH | d8abb04409879b88bdfd32cf323bcbea037ae630 | 708,035 |
def _getMissingResidues(lines):
"""Returns the missing residues, if applicable."""
try:
missing_residues = []
for i, line in lines['REMARK 465']:
if len(line.split()) == 5 and int(line.split()[4]) > 0:
missing_residues.append("{0:<3s} {1}{2:>4d}".format(line.split()[2], line.split()[3], int(line.split()[4])))
return missing_residues
except:
return "no missing residue information" | 071c6d792bc703d0379774eb19c09d9599f17c66 | 708,036 |
def get_season(msg, info_fields):
"""find season in message"""
seasonDICT = {'2016':['二零一六球季', '二零一六賽季', '2016球季', '2016賽季', '2016年', '2016'],
'2017':['二零一七球季', '二零一七賽季', '2017球季', '2017賽季', '2017年', '2017'],
'2018':['二零一八球季', '二零一八賽季', '2018球季', '2018賽季', '2018年', '2018'],
'2019':['二零一九球季', '二零一九賽季', '2019球季', '2019賽季', '2019年', '2019'],
'2020':['二零二零球季', '二零二零賽季', '2020球季', '2020賽季', '2020年', '2020']}
for season_key in seasonDICT.keys():
for year in seasonDICT[season_key]:
if year in msg:
info_fields['season'] = season_key
msg = msg.replace(year, '').strip()
return msg, info_fields
return msg, info_fields | 8b5dfceafe45d9ba325c519b24dde03a20d37655 | 708,038 |
def lsst_exposure_time(bands=''):
"""
Sample from the LSST exposure time distribution
"""
dist = {'u': 15.0, 'g': 15.0, 'r': 15.0, 'i': 15.0, 'z': 15.0, 'Y': 15.0}
return [dist[b] for b in bands.split(',')] | 1374512a73b9a0eaf3b1757b09cfdd519fba520c | 708,039 |
def bin2hexstring(bin_str):
"""
二进制串转十六进制串,按照 4:1 比例转换
:param bin_str: 二进制串
:return: 十六进制串
"""
bin_len = len(bin_str)
left = 0
right = 4
re_str = hex(int(bin_str[left:right], 2))[2:]
for i in range(right, bin_len, 4):
left = right
right += 4
re_str += hex(int(bin_str[left:right], 2))[2:]
return re_str | 823ba4ef86ebcf7e30a29c3718768c6a654acad5 | 708,040 |
def check_dict_word(word, target):
"""
Check dict word. If one character not in searching word, then not add the word to python_dict.
:param word: str, word in dictionary.txt.
:param target: str, the searching word
:return: True, all character within are in searching word.
"""
# Level one: check len
if len(word) == len(target):
# Check all the word: contains -> contains, contais
for ch in word:
if ch not in target:
return False
else:
if ch == word[len(word)-1]:
return True | 91751f580aa74b7340946f0642c24e11dc19ff32 | 708,041 |
def get_memory_in_GB(memory_str):
"""Returns the memory value in GB from a given string in kB"""
try:
return '{0} GB'.format(int(memory_str[:-2]) / 1000000)
except (ValueError, TypeError):
return '' | 4c94c00a5e800ed807f4c3a31fe89e90f28260fe | 708,042 |
def filehash(thisfile, filesha):
"""
First parameter, filename
Returns SHA1 sum as a string of hex digits
"""
try:
filehandle = open(thisfile, "rb")
except:
return ""
data = filehandle.read()
while data != b"":
filesha.update(data)
data = filehandle.read()
filehandle.close()
return filesha.hexdigest() | bb6c965d5a0c5f332320d2426b066b4fa85f77e3 | 708,043 |
from sys import getsizeof
def object_size(o):
"""
Calls `getsizeof <https://docs.python.org/3/library/sys.html#sys.getsizeof>`_.
@param o object
@return size of the object, that excludes references objects.
"""
return getsizeof(o) | f66dbbd543001b904f960aa75fd26f0c4737f5a5 | 708,044 |
import time
def current_time():
""" current_time() -> str
>>> current_time()
14:28:04
Returns the current local time in 24 clock system.
"""
return time.strftime('%X', (time.localtime())) | 9ab4ed21d1480e1923c8a55b8f213ff47cb8adcc | 708,045 |
def prepare_hex_string(number, base=10):
"""
Gets an int number, and returns the hex representation with even length padded to the left with zeroes
"""
int_number = int(number, base)
hex_number = format(int_number, 'X')
# Takes the string and pads to the left to make sure the number of characters is even
justify_hex_number = hex_number.rjust((len(hex_number) % 2) + len(hex_number), '0')
return justify_hex_number | e6efeca87d5f0a603c8fdb65fd7e2d07cc491766 | 708,046 |
def plotly_figure(figure, id: str):
"""
:param figure: plotly graph object or px figure
:param id: unique id string of format 'id_xxx' with x representin a number
:return: html style string containing a plotly figure
"""
json_figure = figure.to_json()
html = """
<div id="""+id+"""></div>
<script>
var plotly_data = {}
Plotly.react("""+id+""", plotly_data.data, plotly_data.layout);
</script>
"""
local_text = html.format(json_figure)
return local_text | 949415c70d467c48ee3aa1f028c9e3539099febf | 708,047 |
def _add_resources_to_vault_obj(obj, data, columns):
"""Add associated resources to column and data tuples
"""
i = 0
for s in obj.resources:
if obj.resources[i].id:
name = 'resource_id_' + str(i + 1)
data += (obj.resources[i].id,)
columns = columns + (name,)
name = 'resource_type_' + str(i + 1)
data += (obj.resources[i].type,)
columns = columns + (name,)
i += 1
return data, columns | 3a6dd7541ac853a7c62b638abf4d0eeb21bb6cb2 | 708,048 |
def classify_helmet_belt_worn(x):
"""
This function returns a strinig representation of the int value of the field which specifies whether the
person was wearing a setabelt or a helmet. This specification is from the Road Crash Statistics Victoria , 2013 Edition
document.
:param x: int value representing the classify helmet belt worn field
:return: string representation of the integer value
"""
if x == 1:
return 'Seatbelt Worn'
elif x == 2:
return 'Seatbelt Not Worn'
elif x == 3:
return 'Child Restraint Worn'
elif x == 4:
return 'Child Restraint Not Worn'
elif x == 5:
return 'Seatbelt/restraint Not fitted'
elif x == 6:
return 'Crash Helmet Worn'
elif x == 7:
return 'Crash Helmet Not Worn'
elif x == 8:
return 'Not Appropriate'
else:
return 'Not Known' | cba05be8d03c933e767a75400032d07e296e0ec3 | 708,049 |
import collections
def sort_dataset_by_len(dataset):
"""
returns a dict mapping length -> list of items of that length
an OrderedDict is used to that the mapping is sorted from smallest to largest
"""
sorted_dataset = collections.OrderedDict()
lengths = sorted(list(set(len(x[1]) for x in dataset)))
for l in lengths:
sorted_dataset[l] = []
for item in dataset:
sorted_dataset[len(item[1])].append(item)
return sorted_dataset | 1e67da963c6d968fba39730cc33e100242fcafca | 708,050 |
import copy
import random
def select_random_user_goals(user_goals_no_req_slots, user_goals_with_req_slots, cardinality_no_req, cardinality_req):
"""
Helper method to randomly select user goals
"""
random_user_goals = {}
random_user_goals['all'] = []
# select randomly user goals without request slots
random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_no_req_slots, cardinality_no_req)))
# select randomly user goals with request slots
random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_with_req_slots, cardinality_req)))
return random_user_goals | ff51361d45cdbd62cc9ee9e8263d47870435b326 | 708,051 |
import click
def optional_tools_or_packages_arg(multiple=False):
""" Decorate click method as optionally taking in the path to a tool
or directory of tools or a Conda package. If no such argument is given
the current working directory will be treated as a directory of tools.
"""
name = "paths" if multiple else "path"
nargs = -1 if multiple else 1
return click.argument(
name,
metavar="TARGET",
nargs=nargs,
) | 4a34da51b4a644df70c5ce3ea8afb8b86ae2281d | 708,052 |
import numpy
def linear_interpolate_cdf(base_cdf):
"""Linear interpolate regions of straight lines in the CDF.
Parameters:
base_cdf (list): n elements of non-decreasing order.
Returns:
list of length base_cdf where consecutive elements of straight lines
are linearly interpolated between the left and right sides.
"""
target_cdf = list(base_cdf)
index = 0
left_val = 0
while index < len(base_cdf)-1:
if base_cdf[index] == base_cdf[index+1]:
# search for where it ends
offset = index+1
while (offset < len(base_cdf)-1 and
base_cdf[offset] == base_cdf[offset+1]):
offset += 1
# linearly interpolate between index and offset
right_val = base_cdf[offset]
interp_val = numpy.interp(
list(range(index, offset+1, 1)),
[index-1, offset],
[float(left_val), float(right_val)])
target_cdf[index:offset+1] = interp_val
left_val = right_val
index = offset+1
else:
left_val = base_cdf[index]
index += 1
return target_cdf | 8f119d1698a44e90253920decf1b3253db9171be | 708,053 |
def hash_str(string: str) -> int:
"""
Create the hash for a string (poorly).
"""
hashed = 0
results = map(ord, string)
for result in results:
hashed += result
return hashed | b80c177974437966361e4117ba235c1563fee5c4 | 708,054 |
def compare_files(file_name1, file_name2):
"""
Compare two files, line by line, for equality.
Arguments:
file_name1 (str or unicode): file name.
file_name2 (str or unicode): file name.
Returns:
bool: True if files are equal, False otherwise.
"""
with open(file_name1) as file1, open(file_name2) as file2:
for line1, line2 in zip(file1, file2):
if line1 != line2:
file1.close()
file2.close()
return False
file1.close()
file2.close()
return True | 3f77cf177ba60ddd121b95648379fff845d9877b | 708,055 |
import os
def _get_template_dirs(type="plugin"):
"""Return a list of directories where templates may be located.
"""
template_dirs = [
os.path.expanduser(os.path.join("~", ".rapport", "templates", type)),
os.path.join("rapport", "templates", type) # Local dev tree
]
return template_dirs | b0c6351d0e346310f5d36c6d010332cc1e1b54ea | 708,056 |
def push_gitlab_event_dict():
"""
Cleared version of the push gitlab webhook content.
"""
return {
"object_kind": "push",
"event_name": "push",
"before": "0e27f070efa4bef2a7c0168f07a0ac36ef90d8cb",
"after": "cb2859505e101785097e082529dced35bbee0c8f",
"ref": "refs/heads/build-branch",
"checkout_sha": "cb2859505e101785097e082529dced35bbee0c8f",
"user_id": 5647360,
"user_name": "Shreyas Papinwar",
"user_username": "shreyaspapi",
"user_email": "",
"user_avatar": "https://assets.gitlab-static.net/uploads/-"
"/system/user/avatar/5647360/avatar.png",
"project_id": 18032222,
"project": {
"id": 18032222,
"name": "Hello there",
"description": "Hehehehe",
"web_url": "https://gitlab.com/the-namespace/repo-name",
"git_ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"git_http_url": "https://gitlab.com/the-namespace/repo-name.git",
"namespace": "Testing packit",
"visibility_level": 20,
"path_with_namespace": "the-namespace/repo-name",
"default_branch": "master",
"homepage": "https://gitlab.com/the-namespace/repo-name",
"url": "git@gitlab.com:the-namespace/repo-name.git",
"ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"http_url": "https://gitlab.com/the-namespace/repo-name.git",
},
"commits": [
{
"id": "cb2859505e101785097e082529dced35bbee0c8f",
"message": "Update README.md",
"title": "Update README.md",
"timestamp": "2020-06-04T23:14:57+00:00",
"url": "https://gitlab.com/the-namespace/repo-name/-/commit/"
"cb2859505e101785097e082529dced35bbee0c8f",
"author": {"name": "Shreyas Papinwar", "email": "spapinwar@gmail.com"},
"added": [],
"modified": ["README.md"],
"removed": [],
}
],
"total_commits_count": 1,
"push_options": {},
"repository": {
"name": "Hello there",
"url": "git@gitlab.com:the-namespace/repo-name.git",
"description": "Hehehehe",
"homepage": "https://gitlab.com/the-namespace/repo-name",
"git_http_url": "https://gitlab.com/the-namespace/repo-name.git",
"git_ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"visibility_level": 20,
},
} | 3a0134774f828e233c8b1e3fd2d6b94d6fae699f | 708,057 |
import collections
def sort_dict(d, key=None, reverse=False):
"""
Sorts a dict by value.
Args:
d: Input dictionary
key: Function which takes an tuple (key, object) and returns a value to
compare and sort by. By default, the function compares the values
of the dict i.e. key = lambda t : t[1]
reverse: Allows to reverse sort order.
Returns:
OrderedDict object whose keys are ordered according to their value.
"""
kv_items = list(d.items())
# Sort kv_items according to key.
if key is None:
kv_items.sort(key=lambda t: t[1], reverse=reverse)
else:
kv_items.sort(key=key, reverse=reverse)
# Build ordered dict.
return collections.OrderedDict(kv_items) | 9ca904a5e0df3e3c50b29967adfe9061e778dfc9 | 708,058 |
def getReviewRedirect(entity, params):
"""Returns the redirect to review the specified entity.
"""
return '/%s/review/%s' % (
params['url_name'], entity.key().id_or_name()) | 959ff6d0297ec54248ee725e93a79702512d00d7 | 708,059 |
import sympy
def __sympyToC_Grad(exprs: list, doOpts: bool = False) -> str:
""" creates C code from a list of sympy functions (somewhat optimized).
source: https://stackoverflow.com/questions/22665990/optimize-code-generated-by-sympy
and modified """
tmpsyms = sympy.numbered_symbols("tmp")
if doOpts:
symbols, simple = sympy.cse(exprs, symbols=tmpsyms, optimizations="basic", order='none')
else:
symbols, simple = sympy.cse(exprs, symbols=tmpsyms)
c_code = ""
for s in symbols:
c_code += " double " +sympy.ccode(s[0]) + " = " + sympy.ccode(s[1]) + ";\n"
for i,s in enumerate(simple):
c_code += f" out({i}) = " + sympy.ccode(s) + ";\n"
return c_code | 33a95d99b19458ac7b8dd8d8e4272485b0f5f206 | 708,060 |
def ger(self, y):
"""Computer an outer product between two vectors"""
assert self.dim() == 1 and y.dim() == 1, "Outer product must be on 1D tensors"
return self.view((-1, 1)).matmul(y.view((1, -1))) | 003dda3dd678fdcf35f63f80c064586320c97d23 | 708,061 |
def loadSource(path):
"""Loads a list of transportReactions. Format:
R("Macgamb_Transp")
R("Madnb_Transp")
R("MalaDb_Transp")..."""
file = open(path, 'r')
sources = [line.strip() for line in file]
file.close()
return sources | 244e9e5619a5039822ef14dfbb3d99b55cb6cc74 | 708,062 |
import ast
def transpose_dict(data, data_key):
"""Function: transpose_dict
Description: Transpose specified keys in a list of dictionaries
to specified data types or None.
Arguments:
(input) data -> Initial list of dictionaries.
(input) data_key -> Dictionary of keys and data types.
(output) mod_data -> Modified list of dictionaries.
"""
data = list(data)
data_key = dict(data_key)
mod_data = list()
literal_list = ["bool", "list"]
for list_item in data:
list_item = dict(list_item)
for item in set(list_item.keys()) & set(data_key.keys()):
if not list_item[item] or list_item[item] == "None":
list_item[item] = None
elif data_key[item] == "int":
list_item[item] = int(list_item[item])
elif data_key[item] in literal_list:
list_item[item] = ast.literal_eval(list_item[item])
mod_data.append(list_item)
return mod_data | 7675ea2f80e9e85993dc99a2a31df04abfeba2c8 | 708,063 |
def aligner_to_symbol(calls):
"""
Assign symbols to different aligners in the input file
Set the attribute of the class instances
return a list of indices for which each aligner is found uniquely and all aligners
sorted by aligners
"""
symbols = ['o', '+', 'x', 'v', '*', 'D', 's', 'p', '8', 'X']
aligners = sorted(set([c.aligner for c in calls]), reverse=True)
aligner_to_symbol_dict = {a: s for a, s in zip(aligners, symbols)}
for c in calls:
c.shape = aligner_to_symbol_dict[c.aligner]
index_and_aligners = zip([[c.aligner for c in calls].index(i) for i in aligners], aligners)
return zip(*sorted(index_and_aligners, key=lambda x: x[1])) | b9cef3ae33b6ce84daf78a8bc8ce528f97d7a8a6 | 708,064 |
def convertCRS(powerplants, substations, towers, crs, grid):
"""
:param powerplants:
:param substations:
:param towers:
:param crs:
:return:
"""
substations.to_crs(crs)
# powerplants = powerplants.set_crs(crs)
# powerplants = powerplants.to_crs(crs)
# print(powerplants.crs)
towers = towers.to_crs(crs)
return(substations, powerplants, towers, grid) | 9fcb8c51323c00935ba2c882502a273f2bf532ff | 708,065 |
def read_number(dtype, prompt='', floor=None, ceil=None, repeat=False):
""" Reads a number within specified bounds. """
while True:
try:
result = dtype(input(prompt))
if floor is not None and result < floor:
raise ValueError(f'Number must be no less than {floor}.')
if ceil is not None and result > ceil:
raise ValueError(f'Number must be no greater than {ceil}.')
except ValueError as e:
print(e)
result = None
if result is not None or not repeat:
return result | a528b1f5912ba4bab0b87c87004311778eaa8187 | 708,066 |
from copy import copy
from numpy import zeros, unique
from itertools import product
def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'):
""" Train a message classification model """
pred = zeros(len(obs))
fullpred = zeros((len(obs),len(unique(obs))))
model = copy(bestModel.model)
#find the best model via tuning grid
for tune in [dict(list(zip(modelgrid, v))) for v in product(*list(modelgrid.values()))]:
for k in list(tune.keys()):
setattr(model,k,tune[k])
i = 0
for tr, vl in cv:
model.fit(trainSet.ix[tr].values,obs[tr])
pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1]
fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values)
i += 1
bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric)
#re-train with all training data
bestModel.model.fit(trainSet.values,obs)
print(bestModel)
return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}} | fdf60d23894bfd997cdf7fa82cb59257ad7b2954 | 708,067 |
import sys
def get_cpuinfo():
"""Returns the flags of the processor."""
if sys.platform == 'darwin':
return platforms.osx.get_cpuinfo()
if sys.platform == 'win32':
return platforms.win.get_cpuinfo()
if sys.platform == 'linux2':
return platforms.linux.get_cpuinfo()
return {} | 2ac223337d54426d36c9fda8d88f3545c6d4c30a | 708,068 |
def default_thread_index (value, threads):
"""
find index in threads array value
:param value:
:param threads:
:return:
"""
value_index = threads.index(value)
return value_index | 7be2efb6579f2880f53dac11705ba6a068c2d92d | 708,069 |
def isID(value):
"""Checks if value looks like a Ulysses ID; i.e. is 22 char long.
Not an exact science; but good enougth to prevent most mistakes.
"""
return len(value) == 22 | 527db9446adc2b88c2117bd35c74474c3e7bad24 | 708,070 |
def _extract_codes_from_element_text(dataset, parent_el_xpath, condition=None): # pylint: disable=invalid-name
"""Extract codes for checking from a Dataset. The codes are being extracted from element text.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
parent_el_xpath (str): An XPath to locate the element(s) with the attribute of interest.
condition (str): An optional XPath expression to limit the scope of what is extracted.
Returns:
list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located.
"""
# include the condition
if condition:
parent_el_xpath = parent_el_xpath + '[' + condition + ']'
parents_to_check = dataset.xml_tree.xpath(parent_el_xpath)
located_codes = list()
for parent in parents_to_check:
located_codes.append((parent.text, parent.sourceline))
return located_codes | 45e4ec2a61dc38066ad9a71d41e63a48c6ccde23 | 708,071 |
def _do_ecf_reference_data_import(
import_method,
widget,
logwidget=None,
specification_items=None,
ecfdate=None,
datecontrol=None,
):
"""Import a new ECF club file.
widget - the manager object for the ecf data import tab
"""
ecffile = widget.datagrid.get_data_source().dbhome
# The commented code fails if tkinter is compiled without --enable-threads
# as in OpenBSD 5.7 i386 packages. The standard build from FreeBSD ports
# until early 2015 at least, when this change was introduced, is compiled
# with --enable-threads so the commented code worked. Not sure if the
# change in compiler on FreeBSD from gcc to clang made a difference. The
# Microsoft Windows' Pythons seem to be compiled with --enable-threads
# because the commented code works in that environment. The situation on
# OS X, and any GNU-Linux distribution, is not known.
# Comparison with the validate_and_copy_ecf_ogd_players_post_2006_rules()
# method in the sibling module sqlite3ecfogddataimport, which worked on
# OpenBSD 5.7 as it stood, highlighted the changes needed.
# ecfdate = widget.get_ecf_date()
if not ecffile:
return False
if not ecfdate:
return False
results = widget.get_appsys().get_results_database()
if not results:
return False
results.do_database_task(
import_method,
logwidget=logwidget,
taskmethodargs=dict(
ecffile=ecffile,
ecfdate=ecfdate,
parent=widget.get_widget(),
# datecontrol=widget.ecfdatecontrol.get(),
datecontrol=datecontrol, # See --enable-threads comment just above.
),
use_specification_items=specification_items,
)
return True | 593b1ac77688c92c9fcd3ea8fafb3f5089849293 | 708,072 |
def decrement(x):
"""Given a number x, returns x - 1 unless that would be less than
zero, in which case returns 0."""
x -= 1
if x < 0:
return 0
else:
return x | 56b95324c147a163d3bdd0e9f65782095b0a4def | 708,073 |
def solver_problem1(digits_list):
"""input digits and return numbers that 1, 4, 7, 8 occurs"""
cnt = 0
for digits in digits_list:
for d in digits:
if len(d) in [2, 3, 4, 7]:
cnt += 1
return cnt | d1946d00d368ad498c9bb0a8562ec0ea76d26449 | 708,074 |
import decimal
def round_decimal(x, digits=0):
"""This function returns the round up float.
Parameters
----------
x : a float
digits : decimal point
Returns
----------
Rounded up float
"""
x = decimal.Decimal(str(x))
if digits == 0:
return int(x.quantize(decimal.Decimal("1"), rounding='ROUND_HALF_UP'))
if digits > 1:
string = '1e' + str(-1 * digits)
else:
string = '1e' + str(-1 * digits)
return float(x.quantize(decimal.Decimal(string), rounding='ROUND_HALF_UP')) | 8670fa1e9063376e012ebbc71df0a19c6205ea9c | 708,075 |
def getPrefix(routetbl, peer_logical):
""" FUNCTION TO GET THE PREFIX """
for route in routetbl:
if route.via == peer_logical:
return route.name
else:
pass | 2ca32a1fd63d6fcefbcc9ac23e8636c73e88455b | 708,076 |
def dump_tuple(tup):
"""
Dump a tuple to a string of fg,bg,attr (optional)
"""
return ','.join(str(i) for i in tup) | ffa4838e2794da9d525b60f4606633f8940480bb | 708,078 |
def gen_check_box_idx():
""" Generate a list containing the coordinate of three
finder patterns in QR-code
Args:
None
Returns:
idx_check_box: a list containing the coordinate each pixel
of the three finder patterns
"""
idx_check_box = []
for i in range(7):
idx_check_box.append((0, i))
idx_check_box.append((6, i))
idx_check_box.append((30, i))
idx_check_box.append((36, i))
idx_check_box.append((0, 30+i))
idx_check_box.append((6, 30+i))
for i in range(1, 6):
idx_check_box.append((i, 0))
idx_check_box.append((i, 6))
idx_check_box.append((i, 30))
idx_check_box.append((i, 36))
idx_check_box.append((30+i, 0))
idx_check_box.append((30+i, 6))
for i in range(3):
for j in range(3):
idx_check_box.append((2+i, 2+j))
idx_check_box.append((32+i, 2+j))
idx_check_box.append((2+i, 32+j))
return idx_check_box | e26d9c5a3b093b52f54eb2c65b844215c40ddab8 | 708,079 |
import json
def json_formatter(result, verbose=False, indent=4, offset=0):
"""Format result as json."""
string = json.dumps(result, indent=indent)
string = string.replace("\n", "\n" + " "*offset)
return string | 512847722fa36eff408ac28d6e3dc8fde5c52af1 | 708,080 |
from typing import Any
from typing import Counter
def calc_proportion_identical(lst: Any) -> float:
"""
Returns a value between 0 and 1 for the uniformity of the values
in LST, i.e. higher if they're all the same.
"""
def count_most_common(lst):
"""
Find the most common item in LST, and count how many times it occurs.
"""
# Counter(['a', 'b', 'a']).most_common(2) -> [
# ('a', 2),
# ('b', 1),
# ]
# so this gives the count of the most common (in this case 2 occurrences of 'a')
return Counter(lst).most_common(1)[0][1]
most_common = count_most_common(lst)
if most_common == 1:
return 0
else:
return most_common / len(lst) | adf467eba11694c5ea4583d7b53029110e59e25a | 708,081 |
import os
def getDirectoriesInDir(directory):
"""
Returns all the directories in the specified directory.
"""
directories = {}
for d in os.listdir(directory):
path = os.path.join(directory, d)
if os.path.isdir(path):
directories[d] = path
return directories | 8d78571d0ebc4fba58abf98354a7bd2bea018e60 | 708,082 |
import numpy
def csr_matrix_multiply(S, x): # noqa
"""Multiplies a :class:`scipy.sparse.csr_matrix` S by an object-array vector x.
"""
h, w = S.shape
result = numpy.empty_like(x)
for i in range(h):
result[i] = sum(S.data[idx]*x[S.indices[idx]] # noqa pylint:disable=unsupported-assignment-operation
for idx in range(S.indptr[i], S.indptr[i+1]))
return result | 77e1630cbdd59f53b1b2885b731e73a14fb18b35 | 708,083 |
def _singleton(name):
"""Returns a singleton object which represents itself as `name` when printed,
but is only comparable (via identity) to itself."""
return type(name, (), {'__repr__': lambda self: name})() | b07003e1716115864bf1914d4b523b36d0f0471f | 708,084 |
import pickle
def fetch_pickle(filename):
"""
Fetches any variable saved into a picklefile with the given filename.
Parameters:
filename (str): filename of the pickle file
Returns:
variable (any pickle compatible type): variable that was saved into the picklefile.
"""
with open(filename, 'rb') as picklefile:
variable = pickle.load(picklefile)
return variable | 172c18520619d102b520658949d2464d5ecfb05c | 708,085 |
def check_clockwise(poly):
"""Checks if a sequence of (x,y) polygon vertice pairs is ordered clockwise or not.
NOTE: Counter-clockwise (=FALSE) vertice order reserved for inner ring polygons"""
clockwise = False
if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in zip(poly, poly[1:] + [poly[0]]))) < 0:
clockwise = not clockwise
return clockwise | 5e9f8fba6cd11e33dfe60a89e62eeac2ac24c805 | 708,086 |
def format_stats(stats):
"""Format statistics for printing to a table"""
result = ''
for key, value in stats.items():
result += f'{key} - {value}\n'
return result[:-1] | 2d01b6c48b83f8e8810f4609183b39fad871f942 | 708,087 |
def timestamp2str(ts):
""" Converts Timestamp object to str containing date and time
"""
date = ts.date().strftime("%Y-%m-%d")
time = ts.time().strftime("%H:%M:%S")
return ' '.join([date, time]) | 0e847a8af0cbbacf18df911e3070ac7c70e504b7 | 708,088 |
import os
def compute_file_path(data_path, path, command):
"""Return the computed file path for mocked data
Keyword arguments:
data_path -- the path of the folder that contains the subbed data
path -- the URL path
command -- the HTTP verb
"""
return os.path.realpath(
os.path.join(
data_path,
f'{path[1:]}.{command}.json'
)
) | 8260a67edb5fca16e4b9004e8596cc080c98ff19 | 708,089 |
import time
def getToday(format=3):
"""返回今天的日期字串"""
t = time.time()
date_ary = time.localtime(t)
if format == 1:
x = time.strftime("%Y%m%d", date_ary)
elif format == 2:
x = time.strftime("%H:%M", date_ary)
elif format == 3:
x = time.strftime("%Y/%m/%d", date_ary)
elif format == 4:
x = time.strftime("%Y/%m/%d %H:%M", date_ary)
elif format == 5:
x = time.strftime("%y%m%d", date_ary)
elif format == 6:
x = time.strftime("%Y-%m-%d", date_ary)
elif format == 7:
x = time.strftime("%Y/%m/%d %H:%M:%S", date_ary)
elif format == 8:
x = time.strftime("%Y-%m-%d %H:%M", date_ary)
elif format == 9:
x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary)
elif format == 10:
x = time.strftime("%Y年%m月%d日 %H:%M", date_ary)
else:
x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary)
return x | 900c0a0d42dc2220c5e5030eeebd858e3e6a41bf | 708,091 |
def parse_test_config(doc):
""" Get the configuration element. """
test_config = doc.documentElement
if test_config.tagName != 'configuration':
raise RuntimeError('expected configuration tag at root')
return test_config | c61c2f4e43c5501c461bb92b63609162b2918860 | 708,093 |
import textwrap
def _get_control_vars(control_vars):
"""
Create the section of control variables
Parameters
----------
control_vars: str
Functions to define control variables.
Returns
-------
text: str
Control variables section and header of model variables section.
"""
text = textwrap.dedent("""
##########################################################################
# CONTROL VARIABLES #
##########################################################################
def _init_outer_references(data):
for key in data:
__data[key] = data[key]
def time():
return __data['time']()
""")
text += control_vars
text += textwrap.dedent("""
##########################################################################
# MODEL VARIABLES #
##########################################################################
""")
return text | 614a6ca5bc8ac7354f63bfceabaff4eb4b93208a | 708,094 |
def get_paybc_transaction_request():
"""Return a stub payment transaction request."""
return {
'clientSystemUrl': 'http://localhost:8080/abcd',
'payReturnUrl': 'http://localhost:8081/xyz'
} | b913438562d4f2b0883e340b48843f9954faa8a4 | 708,095 |
def have_same_items(list1, list2):
""" Проверяет состоят ли массивы list1 и list2 из одинакового
числа одних и тех же элементов
Parameters
----------
list1 : list[int]
отсортированный по возрастанию массив уникальных элементов
list2 : list[int]
массив произвольной длинны произвольных чисел
Returns
-------
bool
"""
return True | 2973a1961e25686fcbd2003dd366429cbd4c67eb | 708,097 |
def rgb2hex(rgb):
"""Converts an RGB 3-tuple to a hexadeximal color string.
EXAMPLE
-------
>>> rgb2hex((0,0,255))
'#0000FF'
"""
return ('#%02x%02x%02x' % tuple(rgb)).upper() | 4c3323e34fcd2c1b4402ebe5f433c5fd9320cce9 | 708,098 |
def create_table(p, table_name, schema):
"""Create a new Prism table.
Parameters
----------
p : Prism
Instantiated Prism class from prism.Prism()
table_name : str
The name of the table to obtain details about. If the default value
of None is specified, details regarding first 100 tables is returned.
schema : list
A list of dictionaries containing the schema
Returns
-------
If the request is successful, a dictionary containing information about
the table is returned.
"""
p.create_bearer_token()
table = p.create_table(table_name, schema=schema)
return table | 43c8c789d4e212d2d98d68f4f22e3f0fb0a97552 | 708,100 |
def valid_chapter_name(chapter_name):
"""
判断目录名称是否合理
Args:
chapter_name ([type]): [description]
"""
for each in ["目录"]:
if each in chapter_name:
return False
return True | 9ec71837503f969808a6a666a3bf999ee3290f03 | 708,101 |
def subfield(string, delim, occurrence):
"""
function to extract specified occurence of subfield from string
using specified field delimiter
eg select subfield('abc/123/xyz','/',0) returns 'abc'
eg select subfield('abc/123/xyz','/',1) returns '123'
eg select subfield('abc/123/xyz','/',2) returns 'xyz'
eg select subfield('abc/123/xyz','/',3) returns ''
"""
"""
# this logic matches the functions written for msql and psql,
# because they do not have a string method to do this
ans = ''
found = 0
for ch in string:
if ch == delim:
found += 1
if found == occurrence + 1:
break
elif found == occurrence:
ans += ch
if not found:
ans = '' # else it returns the entire string
return ans
"""
# python does have a suitable string method, so use it
if delim in string:
try:
return string.split(delim)[occurrence]
except IndexError: # equivalent to the last example above
return ''
else:
return '' | ef022d0ca05e969e8ad69e4644cd24d1b7f47cb8 | 708,102 |
def extract_info(spec):
"""Extract information from the instance SPEC."""
info = {}
info['name'] = spec.get('InstanceTypeId')
info['cpu'] = spec.get('CpuCoreCount')
info['memory'] = spec.get('MemorySize')
info['nic_count'] = spec.get('EniQuantity')
info['disk_quantity'] = spec.get('DiskQuantity')
if spec.get('LocalStorageAmount'):
info['disk_count'] = spec.get('LocalStorageAmount')
info['disk_size'] = spec.get('LocalStorageCapacity')
info['disk_type'] = spec.get('LocalStorageCategory')
# Some special families use NVMe driver for local disks
_families = ['ecs.i3', 'ecs.i3g']
if spec.get('InstanceTypeFamily') in _families:
info['local_disk_driver'] = 'nvme'
else:
info['local_disk_driver'] = 'virtio_blk'
# Some special families use NVMe driver for cloud disks
_families = ['ecs.g7se']
if spec.get('InstanceTypeFamily') in _families:
info['cloud_disk_driver'] = 'nvme'
else:
info['cloud_disk_driver'] = 'virtio_blk'
# Some security-enhanced instance families have 50% encrypted memory
_families = ['ecs.c7t', 'ecs.g7t', 'ecs.r7t']
if spec.get('InstanceTypeFamily') in _families:
info['memory'] = int(info['memory'] * 0.5)
return info | 7f93dcad1a8d99743a30d441dad64c2b9af08037 | 708,103 |
def parse_title(title):
"""Parse strings from lineageos json
:param title: format should be `code - brand phone`
"""
split_datum = title.split(' - ')
split_name = split_datum[1].split(' ')
device = split_datum[0]
brand = split_name[0]
name = ' '.join(split_name[1:])
return [brand, name, device, device] | c3783ab36f4f7e021bdd5f0f781bb289ab2d458f | 708,104 |
def config_data() -> dict:
"""Dummy config data."""
return {
"rabbit_connection": {
"user": "guest",
"passwd": "guest",
"host": "localhost",
"port": 5672,
"vhost": "/",
},
"queues": {"my_queue": {"settings": {"durable": True}, "limit": 0}},
"queue_limits": {0: ["my_queue"], 1: ["my_other_queue"]},
"notifiers": {
"smtp": {
"host": "localhost",
"user": None,
"passwd": None,
"from_addr": "test@test.com",
"to_addr": ["test@yourtest.com"],
"subject": "AMQPeek - RMQ Monitor",
},
"slack": {"api_key": "apikey", "username": "ampeek", "channel": "#general"},
},
} | cbbed3baf79b5928be47d3d00c747ac6be625ae5 | 708,105 |
def copy_linear(net, net_old_dict):
"""
Copy linear layers stored within net_old_dict to net.
"""
net.linear.weight.data = net_old_dict["linears.0.weight"].data
net.linear.bias.data = net_old_dict["linears.0.bias"].data
return net | 8ba7f40e72b65ebef9948025b3404cbc5a660960 | 708,106 |
import sys
import logging
import os
def to_relative(path, root, relative):
"""Converts any absolute path to a relative path, only if under root."""
if sys.platform == 'win32':
path = path.lower()
root = root.lower()
relative = relative.lower()
if path.startswith(root):
logging.info('%s starts with %s' % (path, root))
path = os.path.relpath(path, relative)
else:
logging.info('%s not under %s' % (path, root))
return path | 50911c6cec942e9be0d694f95213053e23d2707a | 708,107 |
def bdev_nvme_add_error_injection(client, name, opc, cmd_type, do_not_submit, timeout_in_us,
err_count, sct, sc):
"""Add error injection
Args:
name: Name of the operating NVMe controller
opc: Opcode of the NVMe command
cmd_type: Type of NVMe command. Valid values are: admin, io
do_not_submit: Do not submit commands to the controller
timeout_in_us: Wait specified microseconds when do_not_submit is true
err_count: Number of matching NVMe commands to inject errors
sct: NVMe status code type
sc: NVMe status code
Returns:
True on success, RPC error otherwise
"""
params = {'name': name,
'opc': opc,
'cmd_type': cmd_type}
if do_not_submit:
params['do_not_submit'] = do_not_submit
if timeout_in_us:
params['timeout_in_us'] = timeout_in_us
if err_count:
params['err_count'] = err_count
if sct:
params['sct'] = sct
if sc:
params['sc'] = sc
return client.call('bdev_nvme_add_error_injection', params) | 3833256e71f47a49eef2643bf8c244308795a0b1 | 708,108 |
def prune_deg_one_nodes(sampled_graph):
""" prune out degree one nodes from graph """
deg_one_nodes = []
for v in sampled_graph.nodes():
if sampled_graph.degree(v) == 1:
deg_one_nodes.append(v)
for v in deg_one_nodes:
sampled_graph.remove_node(v)
return sampled_graph | c4df72a66c6fb57d5d42a1b877a846338f32f42a | 708,110 |
def run_sgd(model, epochs):
"""
Runs SGD for a predefined number of epochs and saves the resulting model.
"""
print("Training full network")
weights_rand_init = model.optimize(epochs=epochs)
# weights_rand_init = model.optimize(epochs=epochs, batch_size=55000, learning_rate=0.1)
print("Model optimized!!!")
return [model.get_model_weights(), weights_rand_init] | 14c6fd1ffa8aab3a783b5738093d69771d036411 | 708,111 |
def drift_var():
"""
Concept drift:
1. n_drifts
2. concept_sigmoid_spacing (None for sudden)
3. incremental [True] or gradual [False]
4. recurring [True] or non-recurring [False]
"""
return [(10, None, False, False), (10, 5, False, False), (10, 5, True, False)] | 34f2c55f928a16cca8c52307853ab32f56ecd954 | 708,112 |
def processed_transcript(df):
"""
Cleans the Transcript table by splitting value fileds and replacing nan values, drop extra columns
PARAMETERS:
transcript dataframe
RETURNS:
Cleaned transcript dataframe
"""
#expand the dictionary to coulmns (reward, amount, offre id) from value field
df['offer_id'] = df['value'].apply(lambda x: x.get('offer_id'))
df['offer id'] = df['value'].apply(lambda x: x.get('offer id'))
df['reward'] = df['value'].apply(lambda x: x.get('reward'))
df['amount'] = df['value'].apply(lambda x: x.get('amount'))
#move 'offer id' values into 'offer_id'
df['offer_id'] = df.apply(lambda x : x['offer id'] if x['offer_id'] == None else x['offer_id'], axis=1)
#drop 'offer id' column
df.drop(['offer id' , 'value'] , axis=1, inplace=True)
#replace nan
df.fillna(0 , inplace=True)
return df | 452668d6d9616ca382f7968e0ac4dd52658be9f6 | 708,113 |
def clean_data(list_in):
"""
Inputs:
list_in - filtered list of ticket orders
Outputs:
Return list of tuples, each tuple contains
(last name, first name, note,[tickets])
"""
notes_list = []
data_out = []
for row in list_in:
trimmed_row = row[row.index('Purchaser Name: ')+16:]
name = trimmed_row[:trimmed_row.index('<br/>')].strip().title()
first_name = name[:name.rindex(' ')] #get first name
last_name = name[name.rindex(' '):] #get last name
trimmed_row = trimmed_row[len(name+'<br/>')+1:]
if 'Special Instructions:' in row: #get notes
note = trimmed_row[22:trimmed_row.index('<br/>')]
trimmed_row = trimmed_row[trimmed_row.index('<br/>')+5:]
notes_list.append((last_name,first_name,note))
else:
note = ''
orders = trimmed_row.split('<br/>')
tickets = []
for order in orders: #get ticket orders
if ('Membership Dues' in order) or ('Donation' in order):
continue
else:
tickets.append(order)
data_out.append([last_name, first_name, note, tickets])
# print(last_name, first_name,note,tickets)
# print()
data_out.sort(key=lambda item: item[1]) #sort by first name (to break last name ties)
data_out.sort(key=lambda item: item[0]) #sort by last name
# for idx, note in enumerate(notes_list): #optional print of all notes
# print(idx,note)
return data_out | f2cdf17895d1661e40b64f3fcc9ff92558f53bdd | 708,114 |
import json
def embed_terms(args, classes, dest, use_cache=True, path_to_json='ebd_cache.json'):
"""
Embeds class strings into word representations.
:param args
:param classes: (list of str) topic classes
:param dest: (str) path to destination file
:param path_to_json: (str) path to json file containing word embeddings
:return: dict {newsgroup class (int id) : embedded vector (nparray of float)}
"""
if use_cache:
with open(dest) as json_file:
return classes, json.load(json_file)
# Not using cache: extract vectors from global set
with open(path_to_json) as json_file:
mappings = json.load(json_file)
input()
input(mappings)
# Cache topic reps
cache = dict(zip(classes, [mappings[topic] for topic in classes]))
with open(dest, 'w') as fp:
json.dump(cache, fp) | 8521b4828907c0083492b0d03848aeeb452d17e6 | 708,115 |
def matrixmult (A, B):
"""Matrix multiplication function
This function returns the product of a matrix multiplication given two matrices.
Let the dimension of the matrix A be: m by n,
let the dimension of the matrix B be: p by q,
multiplication will only possible if n = p,
thus creating a matrix of m by q size.
Parameters
----------
A : list
First matrix, in a 2D array format.
B : list
Second matrix, in a 2D array format.
Returns
-------
C : list
The product of the matrix multiplication.
Examples
--------
>>> from .pycgmStatic import matrixmult
>>> A = [[11,12,13],[14,15,16]]
>>> B = [[1,2],[3,4],[5,6]]
>>> matrixmult(A, B)
[[112, 148], [139, 184]]
"""
C = [[0 for row in range(len(A))] for col in range(len(B[0]))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k]*B[k][j]
return C | 98065981c8047d927bacb07877dbf173ba379159 | 708,116 |
def next_code(value: int, mul: int = 252533, div: int = 33554393) -> int:
"""
Returns the value of the next code given the value of the current code
The first code is `20151125`.
After that, each code is generated by taking the previous one, multiplying it by `252533`,
and then keeping the remainder from dividing that value by `33554393`
"""
return (value * mul) % div | a9e5183e405574cc56a138a244f14de08ea68d00 | 708,117 |