content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import sys import subprocess def _input_password() -> str: """ Get password input by masking characters. Similar to getpass() but works with cygwin. """ sys.stdout.write("Password :\n") sys.stdout.flush() subprocess.check_call(["stty", "-echo"]) password = input() subprocess.check_call(["stty", "echo"]) return password
8d3dbc3f6221f3a2558dab5617227b2f6e4940ca
1,626
def solution2(arr): """improved solution1 #TLE """ if len(arr) == 1: return arr[0] max_sum = float('-inf') l = len(arr) for i in range(l): local_sum = arr[i] local_min = arr[i] max_sum = max(max_sum, local_sum) for j in range(i + 1, l): local_sum += arr[j] local_min = min(local_min, arr[j]) max_sum = max([max_sum, local_sum, local_sum - local_min]) return max_sum
835240bb4f70e5b6425a6ac0d2a4210e2c8a0ad0
1,628
import os def df_to_hdf5(df, key, dir_path): """ Save the DataFrame object as an HDF5 file. The file is stored in the directory specified and uses the key for the filename and 'h5' as the extension. :param df: DataFrame to save as a file :param key: ID for storage and retrieval :param dir_path: Directory to store the HDF5 data file """ file_path = os.path.join(dir_path, key + '.h5') df.to_hdf(file_path, key, complevel=9, complib='zlib') return file_path
aaf7e2d9f64da6f53b2fc908cc59d9331172c614
1,629
def _get_connection_params(resource): """Extract connection and params from `resource`.""" args = resource.split(";") if len(args) > 1: return args[0], args[1:] else: return args[0], []
87cdb607027774d58d1c3bf97ac164c48c32395c
1,630
import subprocess def download_archive(url, out_path): """Downloads a file from the specified URL to the specified path on disk.""" return subprocess.call(['curl', url, '-o', out_path])
e3c59f542a8fa662169d74428ed98dbf79d3d705
1,631
def import_class(class_object): """ Import a class given a string with its name in the format module.module.classname """ d = class_object.rfind(".") class_name = class_object[d + 1:len(class_object)] m = __import__(class_object[0:d], globals(), locals(), [class_name]) return getattr(m, class_name)
82df3ed7d646bd423ccefacc00493e917f13c430
1,633
def get_examples(mode='train'): """ dataset[0][0] examples """ examples = { 'train': ({'id': '0a25cb4bc1ab6f474c699884e04601e4', 'title': '', 'context': '第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,' '也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,' '清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,' '仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,' '抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。', 'question': '仙剑奇侠传3第几集上天界', 'answers': ['第35集'], 'answer_starts': [0]}), } return examples[mode]
0b5fb45bcac847cd3f7e7b3e5b264e350c891211
1,634
def prepare_default_result_dict(key, done, nodes): """Prepares the default result `dict` using common values returned by any operation on the DHT. Returns: dict: with keys `(k, d, n)` for the key, done and nodes; `n` is a list of `dict` with keys `(i, a, x)` for id, address, and expiration. """ d = { "k": key, "d": done, } nb = [] for n in nodes: _node = n.getNode() nb.append({ "i": n.getId().toString(), "a": _node.getAddr(), "x": _node.isExpired() }) d["n"] = nb return d
420beb66352fee7b4d38f6b4cf628cbaa86a03df
1,635
def MatchScorer(match, mismatch): """Factory function that returns a score function set to match and mismatch. match and mismatch should both be numbers. Typically, match should be positive and mismatch should be negative. Resulting function has signature f(x,y) -> number. """ def scorer(x, y): if x == y: return match else: return mismatch return scorer
fe3829efc64cb4d9785e52b8af6949c147481902
1,636
def _compute_paddings(height_pad_amt, width_pad_amt, patch_axes): """Convert the total pad amounts to the format needed by tf.pad().""" top_pad = height_pad_amt // 2 bottom_pad = height_pad_amt - top_pad left_pad = width_pad_amt // 2 right_pad = width_pad_amt - left_pad paddings = [[0, 0] for _ in range(4)] paddings[patch_axes[0]] = [top_pad, bottom_pad] paddings[patch_axes[1]] = [left_pad, right_pad] return paddings
3a5154ba0fa6808bc6dc8e20fcb4203324762ba9
1,637
def get_first(somelist, function): """ Returns the first item of somelist for which function(item) is True """ for item in somelist: if function(item): return item return None
81976910c46102d3b15803d215f3bf5a554f9beb
1,638
import sys import locale def get_process_output(process, encoding=None): """Get the output from the process.""" output = process.communicate() returncode = process.returncode if not encoding: try: encoding = sys.stdout.encoding except Exception: encoding = locale.getpreferredencoding() if returncode != 0: raise RuntimeError("Runtime Error: %s" % (output[0].rstrip().decode(encoding, errors='replace'))) return output[0].decode(encoding, errors='replace')
84622c05e84627d0651d21194391c672fb111b6f
1,639
import itertools def remove_duplicates(llist): """ Removes any and all duplicate entries in the specified list. This function is intended to be used during dataset merging and therefore must be able to handle list-of-lists. :param llist: The list to prune. :return: A list of unique elements only. """ if not llist: return [] llist.sort() return [x for x, _ in itertools.groupby(llist)]
cbdf1a4db99a7a5fac37f25776cc1387ed8c54e0
1,640
def nonseq(): """ Return non sequence """ return 1
7c8f4a616a6761153226d961be02f6cf5b0cc54a
1,642
def kubernetes_node_label_to_dict(node_label): """Load Kubernetes node label to Python dict.""" if node_label: label_name, value = node_label.split("=") return {label_name: value} return {}
c856d4e6d1f2169f7028ce842edc881cbca4e783
1,643
import os def get_datafiles(datadir, prefix = ""): """ Scan directory for all csv files prefix: used in recursive call """ datafiles = [] for fname in os.listdir(datadir): fpath = os.path.join(datadir, fname) datafile = os.path.join(prefix, fname) if os.path.isdir(fpath): datafiles += get_datafiles(fpath, datafile) elif fname.endswith(".csv"): datafiles.append(datafile) return datafiles
9d975985a7b16af75436f8941881982f8a39d5d7
1,645
import os def load_fonts(folder="fonts/latin"): """Load all fonts in the fonts directories """ fonts = [] if folder is not None: if os.path.isdir(folder): # the folder exists whether it is relative or absolute path for font in os.listdir(folder): if font.split(".")[-1].lower() in ["ttf", "otf"]: fonts.append(os.path.join(folder, font)) return fonts elif os.path.isdir(os.path.join(os.path.dirname(__file__), folder)): # we are working with base folder of this library for font in os.listdir(os.path.join(os.path.dirname(__file__), folder)): if font.split(".")[-1].lower() in ["ttf", "otf"]: fonts.append(os.path.join(os.path.dirname(__file__), folder, font)) return fonts raise Exception("No font folder specified/found!")
87e15b826e99b3d350fcb4ad8e58ac968644a4d0
1,647
def smallest_subarray_with_given_sum(arr, s): """Find the length of the smallest subarray whose sum is >= s. Time: O(n) Space: O(1) >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 3, 2], 7) 2 >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 8], 7) 1 >>> smallest_subarray_with_given_sum([3, 4, 1, 1, 6], 8) 3 """ win_sum = 0 win_start = 0 min_len = 0 for win_end in range(len(arr)): win_sum += arr[win_end] while win_sum >= s: cur_len = win_end - win_start + 1 if min_len == 0 or cur_len < min_len: min_len = cur_len win_sum -= arr[win_start] win_start += 1 return min_len
4a1d63619fc200c32ffae80dc7d404f486efcdd1
1,648
def teraflops_for_accelerator(accel): """ Stores the number of TFLOPs available to a few accelerators, including driver handicaps. Args: accel (str): A string descriptor of which accelerator to use. Must be either "3090" or "V100". Returns: accel_flops (int): an integer of how many TFLOPs are in the accelerator. """ accel_flops = {"3090": 71, "V100": 125} return accel_flops[accel]
a491beb06baf73325e2e7b5f0876e98ea312e2aa
1,650
import numpy as np import copy def copy_ffn(model): """Copy feed forward network model. Args: model: A previously created ffn model Returns: A copy of the model """ #init model as list holding data for each layer start with input layer newmodel = [] newmodel.append({ "layer":0, "n": copy.copy(model[0]['n']), "activation": copy.copy(model[0]["activation"]), "lreg": copy.copy(model[0]["lreg"]), "regval": copy.copy(model[0]["regval"]), "desc": copy.copy(model[0]["desc"]) }) # init weights and biases for hidden layers and declare activation function for layer in range(1, len(model)): newmodel.append({ "layer":layer, "n": copy.copy(model[layer]['n']), "activation": copy.copy(model[layer]["activation"]), "lreg": copy.copy(model[layer]["lreg"]), "regval": copy.copy(model[layer]["regval"]), "desc": copy.copy(model[layer]["desc"]), "weight": np.copy(model[layer]["weight"]), "bias": np.copy(model[layer]["bias"]), "weightdot": np.copy(model[layer]["weightdot"]), "biasdot": np.copy(model[layer]["biasdot"]) }) return newmodel
5bde1163d5d53a75839b15aaa38a28ecc54b195c
1,651
def is_big(label: str) -> bool: """Returns whether or not a cave is large based on its label""" return label.isupper()
7abdb0c5687e7870c96b767dc498e1f3c4ed21fe
1,652
def _name_cleaner(agent_name): """Renames agent_name to prettier string for plots.""" rename_dict = {'correct_ts': 'Correct TS', 'kl_ucb': 'KL UCB', 'misspecified_ts': 'Misspecified TS', 'ucb1': 'UCB1', 'ucb-best': 'UCB-best', 'nonstationary_ts': 'Nonstationary TS', 'stationary_ts': 'Stationary TS', 'greedy': 'greedy', 'ts': 'TS', 'action_0': 'Action 0', 'action_1': 'Action 1', 'action_2': 'Action 2', 'bootstrap': 'bootstrap TS', 'laplace': 'Laplace TS', 'thoughtful': 'Thoughtful TS', 'gibbs': 'Gibbs TS'} if agent_name in rename_dict: return rename_dict[agent_name] else: return agent_name
e874745e804e07e385b377ec0ecd4247640ef6ce
1,653
def add_training_args(parser): """Training arguments.""" group = parser.add_argument_group('train', 'training configurations') group.add_argument('--experiment-name', type=str, default="gpt-345M", help="The experiment name for summary and checkpoint") group.add_argument('--batch-size', type=int, default=4, help='Data Loader batch size') group.add_argument('--gradient-accumulation-steps', type=int, default=1, help='Data Loader batch size') group.add_argument('--weight-decay', type=float, default=0.01, help='weight decay coefficient for L2 regularization') group.add_argument('--checkpoint-activations', action='store_true', help='checkpoint activation to allow for training ' 'with larger models and sequences') group.add_argument('--checkpoint-num-layers', type=int, default=1, help='chunk size (number of layers) for checkpointing') group.add_argument('--deepspeed-activation-checkpointing', action='store_true', help='uses activation checkpointing from deepspeed') group.add_argument('--epochs', type=int, default=None, help='Number of finetunning epochs. Zero results in evaluation only.') group.add_argument('--clip-grad', type=float, default=1.0, help='gradient clipping') group.add_argument('--train-iters', type=int, default=0, help='total number of iterations to train over all training runs') group.add_argument('--label-smoothing', type=float, default=0.0) group.add_argument('--log-interval', type=int, default=100, help='report interval') group.add_argument('--summary-dir', type=str, default="", help="The directory to store the summary") group.add_argument('--seed', type=int, default=1234, help='random seed') # Batch producer arguments group.add_argument('--reset-position-ids', action='store_true', help='Reset posistion ids after end-of-document token.') group.add_argument('--reset-attention-mask', action='store_true', help='Reset self attention maske after ' 'end-of-document token.') # Learning rate. group.add_argument('--lr-decay-iters', type=int, default=None, help='number of iterations to decay LR over,' ' If None defaults to `--train-iters`*`--epochs`') group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine', 'exponential'], help='learning rate decay function') group.add_argument('--lr-decay-ratio', type=float, default=0.1) group.add_argument('--lr', type=float, default=1.0e-4, help='initial learning rate') group.add_argument('--warmup', type=float, default=0.01, help='percentage of data to warmup on (.01 = 1% of all ' 'training iters). Default 0.01') group.add_argument('--switch-linear', action='store_true', help="Switch to linear decay for cosine decay") # model checkpointing group.add_argument('--save', type=str, default=None, help='Output directory to save checkpoints to.') group.add_argument('--new-save-directory', action='store_true') group.add_argument('--save-epoch', type=int, default=1, help='number of epochs between saves') group.add_argument('--save-interval', type=int, default=5000, help='number of iterations between saves') group.add_argument('--no-save-optim', action='store_true', help='Do not save current optimizer.') group.add_argument('--no-save-rng', action='store_true', help='Do not save current rng state.') group.add_argument('--load', type=str, default=None, help='Path to a directory containing a model checkpoint.') group.add_argument('--no-load-optim', action='store_true', help='Do not load optimizer when loading checkpoint.') group.add_argument('--no-load-rng', action='store_true', help='Do not load rng state when loading checkpoint.') group.add_argument('--no-load-lr-scheduler', action='store_true', help='Do not load lr scheduler when loading checkpoint.') group.add_argument('--no-deepspeed-load', action='store_true', help='Not use deepspeed when loading checkpoint') group.add_argument('--finetune', action='store_true', help='Load model for finetuning. Do not load optimizer ' 'or rng state from checkpoint and set iteration to 0. ' 'Assumed when loading a release checkpoint.') group.add_argument('--resume-dataloader', action='store_true', help='Resume the dataloader when resuming training. ' 'Does not apply to tfrecords dataloader, try resuming' 'with a different seed in this case.') # distributed training args group.add_argument('--distributed-backend', default='nccl', help='which backend to use for distributed training. One of [gloo, nccl]', choices=['nccl', 'gloo']) group.add_argument('--DDP-impl', default='torch', choices=['local', 'torch', 'none'], help='which DistributedDataParallel implementation to use.') group.add_argument('--local_rank', type=int, default=None, help='local rank passed from distributed launcher') # BlockLM training args group.add_argument('--block-lm', action='store_true', help="whether use the BlockLM pre-training") group.add_argument('--masked-lm', action='store_true', help='whether to use the mlm objective') group.add_argument('--bert-prob', type=float, default=0.5) group.add_argument('--gpt-infill-prob', type=float, default=0.5) group.add_argument('--gpt-min-ratio', type=float, default=0.5) group.add_argument('--gap-sentence-prob', type=float, default=0.0) group.add_argument('--gap-sentence-ratio', type=float, default=0.15) group.add_argument('--avg-block-length', type=int, default=3) group.add_argument('--short-seq-prob', type=float, default=0.0) group.add_argument('--single-span-prob', type=float, default=0.0) group.add_argument('--task-mask', action='store_true', help="Use different mask for generation and blank filling") group.add_argument('--no-shuffle-block', action='store_true', help="not shuffle the blocks when filling the blank") group.add_argument('--no-block-position', action='store_true', help='Use (rough) absolute positions instead of block positions') group.add_argument('--sentinel-token', action='store_true', help="Use sentinel (mask) tokens to replace 2d position encoding") group.add_argument('--block-mask-prob', type=float, default=0.0) group.add_argument('--context-mask-ratio', type=float, default=0.0) group.add_argument('--random-position', action='store_true', help="Use random start position to cover all the position embeddings") return parser
05c71d77320644fdaf00ef1638e76dbbce60ffb5
1,654
def _uniqueElements(an_iterable): """ :param iterable an_iterable: :param int idx: :return list: has only one occurrence of each element """ used = [] unique = [x for x in an_iterable if x not in used and (used.append(x) or True)] return unique
8290d30e48c3ade4a547d7c3a8cf0c57b8d45b19
1,655
def _bias_scale(x, b, data_format): """The multiplication counter part of tf.nn.bias_add.""" if data_format == 'NHWC': return x * b elif data_format == 'NCHW': return x * b else: raise ValueError('invalid data_format: %s' % data_format)
19e5bb9419827f6e6976b1c5ed3cd40cdd676ad0
1,656
import re def checkTableName(tables): """ Check if table name has an underscore or not.""" bad = set() output = [] for i in tables: if re.search('.*_.*', i): bad.add(i) if bad: output.append("These tables have underscores in the name") for i in bad: output.append(i) output.append("") else: output.append("No malformed table names") output.append("") return (output, bad)
2847c20712e6ce92367772678d058a05b5d10dc3
1,657
def get_wrf_config(wrf_config, start_date=None, **kwargs): """ precedence = kwargs > wrf_config.json > constants """ if start_date is not None: wrf_config['start_date'] = start_date for key in kwargs: wrf_config[key] = kwargs[key] return wrf_config
c9e070b91ab93a7cb81a576aa799537361b7a26f
1,658
def hamming(s1, s2): """Return the hamming distance between 2 DNA sequences""" return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) + abs(len(s1) - len(s2))
e3e1f3e9cc883f27d26f00c1b3c9495d29c1a139
1,659
def nasnet_dual_path_scheme_ordinal(module, x, _): """ NASNet specific scheme of dual path response for an ordinal module with dual inputs/outputs in a DualPathSequential module. Parameters: ---------- module : nn.Module A module. x : Tensor Current processed tensor. Returns ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ return module(x), x
aef487a25bc3349f14a112826ee4f8e8912dd324
1,660
import sqlite3 def get_exp_date_stats(db_file_name, Table): """Caculate exp date stats of collection""" conn = sqlite3.connect(db_file_name) c = conn.cursor() c.execute('''SELECT exp, count(exp) FROM {} GROUP BY exp'''.format(Table)) exp_dict = {} results = c.fetchall() for result in results: exp_dict[str(result[0])] = result[1] conn.commit() conn.close() return exp_dict
7641d6309939359c1d790b66a1310b5b78be99a4
1,661
def print_scientific_16(value: float) -> str: """ Prints a value in 16-character scientific notation. This is a sub-method and shouldnt typically be called .. seealso:: print_float_16 for a better method """ if value == 0.0: return '%16s' % '0.' python_value = '%16.14e' % value # -1.e-2 svalue, sexponent = python_value.strip().split('e') exponent = int(sexponent) # removes 0s if abs(value) < 1.: sign = '-' else: sign = '+' # the exponent will be added later... sexp2 = str(exponent).strip('-+') value2 = float(svalue) # the plus 1 is for the sign len_sexp = len(sexp2) + 1 leftover = 16 - len_sexp if value < 0: fmt = "%%1.%sf" % (leftover - 3) else: fmt = "%%1.%sf" % (leftover - 2) svalue3 = fmt % value2 svalue4 = svalue3.strip('0') field = "%16s" % (svalue4 + sign + sexp2) return field
18072bfb5cc51e83f1c26086558abc4019e4737e
1,663
def _interpolate_target(bin_edges, y_vals, idx, target): """Helper to identify when a function y that has been discretized hits value target. idx is the first index where y is greater than the target """ if idx == 0: y_1 = 0. else: y_1 = y_vals[idx - 1] y_2 = y_vals[idx] edge_1 = bin_edges[idx] edge_2 = bin_edges[idx + 1] frac = (target - y_1) / (y_2 - y_1) x = edge_1 + frac * (edge_2 - edge_1) return x
7a84bc846c8446aa7449732fdb60171d6f144863
1,664
def count_path_recursive(m, n): """Count number of paths with the recursive method.""" def traverse(m, n, location=[1, 1]): # return 0 if past edge if location[0] > m or location[1] > n: return 0 # return 1 if at end position if location == [m, n]: return 1 return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1]) return traverse(m, n)
ad31718d179bf46966117ecfa414807e6d356634
1,665
def likelihood(sent, ai, domain, temperature): """Computes likelihood of a given sentence according the giving model.""" enc = ai._encode(sent, ai.model.word_dict) score, _, _= ai.model.score_sent(enc, ai.lang_h, ai.ctx_h, temperature) return score
8332dfc8c2dba18a117768043dff67e632cc22ff
1,667
def _build_xyz_pow(name, pref, l, m, n, shift=2): """ Builds an individual row contraction line. name = pref * xc_pow[n] yc_pow[m] * zc_pow[n] """ l = l - shift m = m - shift n = n - shift if (pref <= 0) or (l < 0) or (n < 0) or (m < 0): return None mul = " " if pref == 1: ret = name + " =" else: # Basically always an int ret = name + " = %2.1f" % float(pref) mul = " * " if l > 0: ret += mul + "xc_pow[%d]" % (l - 1) mul = " * " if m > 0: ret += mul + "yc_pow[%d]" % (m - 1) mul = " * " if n > 0: ret += mul + "zc_pow[%d]" % (n - 1) mul = " * " if mul == " ": ret += " 1" return ret
0dbae02252b27845e795a586e2e28b58c948fa1d
1,668
def sort_drugs(processed_data, alpha_sort, **kwargs): """ Sorts all drug names, as primary keys of processed data dictionary. Sorting is governed by primary criteria of decreasing cost, then secondary criteria of alphabetical order. Secondary criteria ignores unsafe characters if "alpha_sort" is True; and does not ignore unsafe characters if False. Requires sort_criteria() inner function. Args: processed_data (dictionary): contains all analyzed data. Primary key is drug name (string), and primary value is tuple containing number of prescribers (integer, index 0) and total cost (float, index 1). alpha_sort (boolean): if True, special characters are not considered during sorting. If False, special characters are considered during sorting. safe_char (list of strings): contains all characters considered safe. Returns: all_drugs_sorted (list of strings): contains all drug names in sequential list sorted by drug cost and alphanumeric name. """ def sort_criteria(drug): """ Determines mapped sorting value of cost and alphanumeric name for all drugs, as keys of processed data dictionary. Required by sort_drugs() outer function. Args: drug (string): drug name. Returns: (tuple): ordered and mapped sorting criteria of cost and name. """ # Sets first criteria of decreasing drug cost cost_criteria = - processed_data[drug][1] # Sets second criteria of alphanumeric drug name name_criteria = drug.upper() # If True, does not consider special characters in alphanumeric order if alpha_sort: # Iterates over all characters in drug name for char in drug: # If character is not in safe list, remove from name criteria if char not in safe_char: # Removes special characters name_criteria = name_criteria.replace(char,"") # Returns primary and secondary sorting criteria return (cost_criteria, name_criteria) # Sets safe characters for evaluation of name criteria safe_char = kwargs['ch'] # Sorts drug names by decreasing cost then alphanumeric order all_drugs_sorted = sorted(processed_data, key=sort_criteria) # Returns list of sorted drug names return all_drugs_sorted
aa3727dc52f0204c7c39807982a998cc03fabd2d
1,669
import math def make_axis_angle_matrix(axis, angle): """construct a matrix that rotates around axis by angle (in radians)""" #[RMS] ported from WildMagic4 fCos = math.cos(angle) fSin = math.sin(angle) fX2 = axis[0]*axis[0] fY2 = axis[1]*axis[1] fZ2 = axis[2]*axis[2] fXYM = axis[0]*axis[1]*(1-fCos) fXZM = axis[0]*axis[2]*(1-fCos) fYZM = axis[1]*axis[2]*(1-fCos) fXSin = axis[0]*fSin fYSin = axis[1]*fSin fZSin = axis[2]*fSin return ( fX2*(1-fCos)+fCos, fXYM-fZSin, fXZM+fYSin, fXYM+fZSin, fY2*(1-fCos)+fCos, fYZM-fXSin, fXZM-fYSin, fYZM+fXSin, fZ2*(1-fCos)+fCos )
1bef075e63b26559184025a69f47d8c1b6dccf1d
1,670
from pathlib import Path import os import json def find_latest(message_ts: str, post_dir: Path) -> str: """Retrieves the latest POST request timestamp for a given message.""" latest_ts = message_ts for postfile in os.listdir(os.fsencode(post_dir)): if (filename := os.fsdecode(postfile)).endswith('.json'): request_ts = filename.strip('.json') if request_ts < latest_ts: continue else: with open(os.path.join(post_dir, filename), 'r') as file: request = json.load(file) if request['container']['message_ts'] == message_ts: if request_ts > latest_ts : latest_ts = request_ts else: continue else: continue return latest_ts
5c5203cf1adc572cf7e9908dcd3c856de7c0f0da
1,671
def get_bio(x, lang='en'): """Get the one-sentence introduction""" bio = x.loc[16][lang] return bio
8c9ddabd2e6ada790af2b85a3fb656291f3ee5bd
1,673
def query_all(): """Queries all matches in Elasticsearch, to be used further for suggesting product names when a user is not aware of them. """ query_all = { "query": {"match_all": {}}, } return query_all
9d15297cf82d813ff0a0688f5c25e2ca6fa145d3
1,674
def greet(lang): """This function is for printing a greeting in some selected languages: Spanish, Swedish, and German""" if lang == 'es': return 'Hola' elif lang == 'ge': return 'Hallo' elif lang == 'sv': return 'Halla' else: return 'Hello'
dcbe0fb39e735666b36780ee8d06b457e0a9541e
1,676
import torch def hamming_dist(y_true, y_pred): """ Calculate the Hamming distance between a given predicted label and the true label. Assumes inputs are torch Variables! Args: y_true (autograd.Variable): The true label y_pred (autograd.Variable): The predicted label Returns: (float): The Hamming distance between the two vectors """ # Make sure y_pred is rounded to 0/1 y_pred = torch.round(y_pred) result = torch.mean(torch.abs(y_true - y_pred), dim=1) result = torch.mean(result, dim=0) return float(result.data.cpu().numpy())
0edda102820626b824861ac0f05d4d77f5def432
1,677
import re def rmchars(value): """Remove special characters from alphanumeric values except for period (.) and negative (-) characters. :param value: Alphanumeric value :type value: string :returns: Alphanumeric value stripped of any special characters :rtype: string >>> import utils >>> utils.rmchars(value = "*6.5_") '6.5' >>> utils.rmchars(value = "ICE") 'ICE' >>> utils.rmchars(value = "-4.2") '-4.2' >>> utils.rmchars(value = "%&!@#8.32&#*;") '8.32' """ value = re.sub("[^A-Za-z0-9.-]+", "", value) return value
63428103f7da4184c6d9f33a9d05b02ce17f2448
1,679
def _filename_pattern(ext): """Returns an re matching native or tfrecord files of format `ext`.""" return r".*\.{}(\.tfrecord)?(\.gz)?".format(ext)
6ec5a86dbba2432293451ca7dff0a0d1d5091bf0
1,681
def _extract_protocol_layers(deserialized_data): """ Removes unnecessary values from packets dictionaries. :param deserialized_data: Deserialized data from tshark. :return: List of filtered packets in dictionary format. """ packets_filtered = [] for packet in deserialized_data: packets_filtered.append(packet["_source"]["layers"]) return packets_filtered
3c3a899909c5278b29ffb402ccb4d8dde24fce3a
1,682
def has_xml_header(filepath): """ Return True if the first line of the file is <?xml :param filepath: :return: """ return True
21fdbdf36cf08ca18d8a0f0d7f7d2201b243c558
1,684
def Get_Histogram_key(qubitOperator): """ Function to obtain histogram key string for Cirq Simulator. e.g. PauliWord = QubitOperator('X0 Z2 Y3', 0.5j) returning: histogram_string = '0,2,3' Args: qubitOperator (openfermion.ops._qubit_operator.QubitOperator): QubitOperator Returns: histogram_string (str): Returns string corresponding to histogram key (required for Cirq simulator) """ qubit_No, PauliStr = zip(*list(*qubitOperator.terms.keys())) histogram_string = ','.join([str(i) for i in qubit_No]) return histogram_string
f574f7b3f6c43de7b3121d4e49240a84a4bcfdfc
1,686
def get_column_labels(): """ This function generates a list of column names for the extracted features that are returned by the get_features function. """ # list the names of the extracted features feature_labels = ["amplitude_envelope", "root_mean_square_energy", "zero_crossing_rate", "band_energy_ratio", "spectral_centroid", "spectral_bandwidth", "spectral_contrast", "spectral_flatness", "spectral_rolloff", "spectral_rolloff_99", "spectral_rolloff_01"] # list the names of the used descriptive statistics measure_suffixes = ["_mean", "_min", "_max", "_std"] # create a list to append the generated column names to columns = ["row_index"] # generate some labels and append them to the list columns.extend([l+s for l in feature_labels for s in measure_suffixes]) # append labels for the distributed AE columns.extend(["amplitude_envelope_f1", "amplitude_envelope_f2", "amplitude_envelope_f3", "amplitude_envelope_f4", "amplitude_envelope_f5"]) # append labels for the distributed RMS columns.extend(["root_mean_square_energy_f0", "root_mean_square_energy_f1", "root_mean_square_energy_f2", "root_mean_square_energy_f3", "root_mean_square_energy_f4", "root_mean_square_energy_f5", "root_mean_square_energy_f6", "root_mean_square_energy_f7", "root_mean_square_energy_f8", "root_mean_square_energy_f9", "root_mean_square_energy_f10"]) # append labels for the distributed ZCR columns.extend(["zero_crossing_rate_f0", "zero_crossing_rate_f1", "zero_crossing_rate_f2", "zero_crossing_rate_f3", "zero_crossing_rate_f4", "zero_crossing_rate_f5", "zero_crossing_rate_f6", "zero_crossing_rate_f7", "zero_crossing_rate_f8", "zero_crossing_rate_f9", "zero_crossing_rate_f10"]) return columns
c140ced9c4344bd7a4029d331d50ebe0750fac0a
1,687
def corr_finder(X, threshold): """ For each variable, find the independent variables that are equal to or more highly correlated than the threshold with the curraent variable Parameters ---------- X : pandas Dataframe Contains only independent variables and desired index threshold: float < 1 Minimum level of correlation to search for Returns ------- Dictionary with the key's as independent variavble indices and values as a list of variables with a correlation greater to or equal than the threshold. Correlation Matrix """ corr_matrix = X.corr(method='kendall') #create the correlation matrix corr_dic = {} for row_name, ser in corr_matrix.iterrows(): #search through each row corr_list = [] #list of variables past/at the threshold for idx, val in ser.iteritems(): #search through the materials of each row if (abs(val) > threshold) and (abs(val) != 1): #if the variable correlates past/at the threshold corr_list.append(idx) if len(corr_list) > 0: corr_dic[row_name] = corr_list return corr_dic, corr_matrix
3b32a3eacb721ff09f6b5614c0ada82df814d5fa
1,688
def base_conv(num, base): """Write a Python program to converting an Integer to a string in any base""" _list = [] if num//base == 0: return str(num%base) else: return (base_conv(num//base, base) + str(num%base))
9fcc28ccfe8ba80d974cc4012aad456bfb8c9544
1,690
def origin_trial_function_call(feature_name, execution_context=None): """Returns a function call to determine if an origin trial is enabled.""" return 'RuntimeEnabledFeatures::{feature_name}Enabled({context})'.format( feature_name=feature_name, context=execution_context if execution_context else "execution_context")
201dbe8449373dbad0144633350d3e6adbb58b80
1,691
def get_bit(byteval, index) -> bool: """retrieve bit value from byte at provided index""" return (byteval & (1 << index)) != 0
1fe020449ae2ae2513073835db6f75b24e558fdb
1,692
import ast import sys def ast_parse_node(node): """ :param ast.Node node: an ast node representing an expression of variable :return ast.Node: an ast node for: _watchpoints_obj = var if <var is a local variable>: # watch(a) _watchpoints_localvar = "a" elif <var is a subscript>: # watch(a[3]) _watchpoints_parent = a _watchpoints_subscr = 3 elif <var is an attribute>: # watch(a.b) _watchpoints_parent = a _watchpoints_attr = "b" """ root = ast.Module( body=[ ast.Assign( targets=[ ast.Name(id="_watchpoints_obj", ctx=ast.Store()) ], value=node ) ], type_ignores=[] ) if type(node) is ast.Name: root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_localvar", ctx=ast.Store()) ], value=ast.Constant(value=node.id) ) ) elif type(node) is ast.Subscript: root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_parent", ctx=ast.Store()) ], value=node.value ) ) if sys.version_info.minor <= 8 and type(node.slice) is ast.Index: value_node = node.slice.value elif sys.version_info.minor >= 9 and type(node.slice) is not ast.Slice: value_node = node.slice else: raise ValueError("Slice is not supported!") root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_subscr", ctx=ast.Store()) ], value=value_node ) ) elif type(node) is ast.Attribute: root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_parent", ctx=ast.Store()) ], value=node.value ) ) root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_attr", ctx=ast.Store()) ], value=ast.Constant(value=node.attr) ) ) ast.fix_missing_locations(root) return root
22b3b6fed61e18ed6dc742040a365ebca8847fd5
1,693
def toint16(i): """ Convert a number to a hexadecimal string of length 2 """ return f'{i:02x}'
3effd2b3f011a962beac19682ad29e930eb0f057
1,695
def get_cpu_cores(): """获取每个cpu核的信息 Returns: 统计成功返回是一个元组: 第一个元素是一个列表存放每个cpu核的信息 第二个元素是列表长度, 也就是计算机中cpu核心的总个数 若统计出来为空, 则返回None """ cpu_cores = [] with open('/proc/cpuinfo') as f: for line in f: info = line.strip() if info.startswith('model name'): model_name = info.split(':')[1].strip() cpu_cores.append(model_name) if cpu_cores: return cpu_cores, len(cpu_cores) return None
ad66faac3a956b1922173263415890bc543e0bba
1,696
def heuristical_lengths(items): """ heuristical_lengths tries to deriver the lengths of the content of items. It always returns a list. a) If typeof(items) is a string, it'll return [len(items)] b) If typeof(items) is a dict, it'll return [len(items)] c) If typeof(items) is either list or tuple, it'll best case try to iterate over each element and record those lengths and return them all flattened. If it can't retrieve the lengths yet len(items) > 0, then it will return [len(items)] d) If items has the '__len__' attribute, it'll return [len(items)] e) Otherwise if it can't derive the type, it'll return [] """ if items is None: return [] elif isinstance(items, str): return [len(items)] elif isinstance(items, dict): return [len(items)] elif isinstance(items, tuple) or isinstance(items, list): lengths = [] for item in items: i_lengths = heuristical_lengths(item) lengths.extend(i_lengths) # In the best case, if len(lengths) == 0 # yet len(items) > 0, just use len(items) if len(lengths) == 0 and len(items) > 0: lengths = [len(items)] return lengths elif hasattr(items, '__len__'): return [len(items)] elif hasattr(items, '__iter__'): lengths = [] itr = iter(items) for it in itr: it_lengths = heuristical_lengths(it) lengths.extend(it_lengths) return lengths else: return []
94a0759bcdc2e57431e8524f164a51f2091b6e61
1,701
def next(space, w_arr): """ Advance the internal array pointer of an array """ length = w_arr.arraylen() current_idx = w_arr.current_idx + 1 if current_idx >= length: w_arr.current_idx = length return space.w_False w_arr.current_idx = current_idx return w_arr._current(space)
668fec305ed6bbe05895f317e284c7d2e4f83189
1,702
def clean_record(raw_string: str) -> str: """ Removes all unnecessary signs from a raw_string and returns it :param raw_string: folder or file name to manage :return: clean value """ for sign in ("'", '(', ')', '"'): raw_string = raw_string.replace(sign, '') return raw_string.replace(' ', '-').replace('--', '-')
ea484934dc10da879ede883287fc1d650cda74b8
1,704
import csv def read_manifest_from_csv(filename): """ Read the ballot manifest into a list in the format ['batch id : number of ballots'] from CSV file named filename """ manifest = [] with open(filename, newline='') as csvfile: reader = csv.reader(csvfile, delimiter = ",") for row in reader: # row.remove(row[1]) batch = " , ".join(row) manifest.append(batch) return manifest[1:]
b04b6a1b20512c27bb83a7631346bc6553fdc251
1,705
def _find_data_between_ranges(data, ranges, top_k): """Finds the rows of the data that fall between each range. Args: data (pd.Series): The predicted probability values for the postive class. ranges (list): The threshold ranges defining the bins. Should include 0 and 1 as the first and last value. top_k (int): The number of row indices per bin to include as samples. Returns: list(list): Each list corresponds to the row indices that fall in the range provided. """ results = [] for i in range(1, len(ranges)): mask = data[(data >= ranges[i - 1]) & (data < ranges[i])] if top_k != -1: results.append(mask.index.tolist()[: min(len(mask), top_k)]) else: results.append(mask.index.tolist()) return results
323986cba953a724f9cb3bad8b2522fc711529e5
1,706
def path_inclusion_filter_fn(path, param, layer): """Returns whether or not layer name is contained in path.""" return layer in path
c93aa83e67c600cd83d053d50fbeaee4f7eebf94
1,709
import socket import time def is_tcp_port_open(host: str, tcp_port: int) -> bool: """Checks if the TCP host port is open.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2) # 2 Second Timeout try: sock.connect((host, tcp_port)) sock.shutdown(socket.SHUT_RDWR) except ConnectionRefusedError: return False except socket.timeout: return False finally: sock.close() # Other errors are propagated as odd exceptions. # We shutdown and closed the connection, but the server may need a second # to start listening again. If the following error is seen, this timeout # should be increased. 300ms seems to be the minimum. # # Connecting to J-Link via IP...FAILED: Can not connect to J-Link via \ # TCP/IP (127.0.0.1, port 19020) time.sleep(0.5) return True
cbe4d0ae58610b863c30b4e1867b47cb1dbdfc3d
1,711
def load_replica_camera_traj(traj_file_path): """ the format: index """ camera_traj = [] traj_file_handle = open(traj_file_path, 'r') for line in traj_file_handle: split = line.split() #if blank line, skip if not len(split): continue camera_traj.append(split) traj_file_handle.close() return camera_traj
1879c97ed5ce24834689b156ffdc971b023e67f2
1,713
def punctuation(chars=r',.\"!@#\$%\^&*(){}\[\]?/;\'`~:<>+=-'): """Finds characters in text. Useful to preprocess text. Do not forget to escape special characters. """ return rf'[{chars}]'
b2fd23d8485c3b6d429723a02a95c981982559b5
1,714
def shift_time(x, dt): """Shift time axis to the left by dt. Used to account for pump & lamp delay""" x -= dt return x
c93fdddea8e41221583139dcc7a2d81177ba7c17
1,715
def log2_fold_change(df, samp_grps): """ calculate fold change - fixed as samp_grps.mean_names[0] over samp_grps.mean_names[1], where the mean names are sorted alphabetically. The log has already been taken, so the L2FC is calculated as mean0 - mean1 :param df: expanded and/or filtered dataframe :param samp_grps: SampleGroups() object :return: dataframe with fold change column appended, with name as in samp_grps.fc_name """ mean1 = samp_grps.mean_names[0] mean2 = samp_grps.mean_names[1] df[samp_grps.fc_name] = df[mean1] - df[mean2] return df
07fcef6f5143095f4f8f77d0251bbd7ecd486fd9
1,716
def configure_smoothing(new_d,smoothing_scans): """ # <batchstep method="net.sf.mzmine.modules.peaklistmethods.peakpicking.smoothing.SmoothingModule"> # <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/> # <parameter name="Filename suffix">smoothed</parameter> # <parameter name="Filter width">9</parameter> # <parameter name="Remove original peak list">false</parameter> # </batchstep> """ idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'SmoothingModule' in d['@method']][0] idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filter width' in d['@name']][0] new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(smoothing_scans) return new_d
031586cf5dbb9fdf1fb6762a89a988367d172942
1,717
def clean_key(func): """Provides a clean, readable key from the funct name and module path. """ module = func.__module__.replace("formfactoryapp.", "") return "%s.%s" % (module, func.__name__)
946288cd231148eb39af5d1e7e0b957d9f2131e8
1,719
def multiply(a,b): """ multiply values Args: a ([float/int]): any value b ([float/int]): any value """ return a*b
67a85b1675da48684e9de7e9834d3daa4357699b
1,720
def check_method(adata): """Check that method output fits expected API.""" assert "labels_pred" in adata.obs return True
78c1a5181395f1675854333c30bf617c578cc1d4
1,722
def extract_traceback(notebook): """ Extracts information about an error from the notebook. Parameters ---------- notebook: :class:`nbformat.notebooknode.NotebookNode` Executed notebook to find an error traceback. Returns ------- bool Whether the executed notebook has an error traceback. int or None Number of a cell with a traceback. If None, then the notebook doesn't contain an error traceback. str Error traceback if exists. """ for cell in notebook['cells']: # Find a cell output with a traceback and extract the traceback outputs = cell.get('outputs', []) for output in outputs: traceback = output.get('traceback', []) if traceback: traceback = '\n'.join(traceback) return True, cell['execution_count'], traceback return False, None, ""
9af26f973e6810936eaa68058efcdb7bc145803b
1,723
import time def cachedmethod(timeout): """ Function decorator to enable caching for instance methods. """ def _cached(func): if not(hasattr(func, 'expires')): func.expires = {} func.cache = {} def __cached(self, *args, **kwargs): if(timeout and func.expires.get(repr(self), 0) < time.time()): if(repr(self) in func.cache): del func.cache[repr(self)] if(repr(self) in func.cache): return func.cache[repr(self)] result = func(self, *args, **kwargs) if(result): func.cache[repr(self)] = result func.expires[repr(self)] = time.time() + timeout return result return __cached try: # see if it's an int int(timeout) except TypeError: func = timeout timeout = 0 return _cached(func) return _cached
dd8999a60aa6d92e6b442c7c0661d88cd0e8590e
1,725
import argparse def parse_options(args): """ Parse commandline arguments into options for Monitor :param args: :return: """ parser = argparse.ArgumentParser() parser.add_argument( "--tcp", required=True, action="append", help="TCP/IP address to monitor, e.g. google.com:80. For best results" " use multiple addresses." ) parser.add_argument("--logfile", default="connection.log", help="Where to store the connection quality data") parser.add_argument("--interval", default=30.0, type=float, help="How many seconds between checks") parser.add_argument("--timeout", default=3.0, type=float, help="How many seconds to wait for connection") parser.add_argument("--quiet", default=False, action="store_true", help="Do not output log data to screen") return parser.parse_args(args)
adda9497c230b885887b8c21f8e1adfd8bdd2376
1,726
def prod_cart(in_list_1: list, in_list_2: list) -> list: """ Compute the cartesian product of two list :param in_list_1: the first list to be evaluated :param in_list_2: the second list to be evaluated :return: the prodotto cartesiano result as [[x,y],..] """ _list = [] for element_1 in in_list_1: for element_2 in in_list_2: _list.append([element_1,element_2]) return _list
9fdbfc558f5ec3b11c78535b9125e0a1c293035e
1,727
from typing import Dict def _extract_assembly_information(job_context: Dict) -> Dict: """Determine the Ensembl assembly version and name used for this index. Ensembl will periodically release updated versions of the assemblies which are where the input files for this processor comes from. All divisions other than the main one have identical release versions, but we don't know which division these files came from so we can't just hit thier API again. Therefore, look at the URL we used to get the files because it contains the assembly version and name. I'll admit this isn't the most elegant solution, but since the transcriptome index's only database model is the OriginalFiles until processing is complete, there's no other way to pass this information through to this processor without modifying the OriginalFile model. The URL path we're attempting follows this pattern (defined in the surveyor) ftp://ftp.{url_root}/gtf/{species_sub_dir}/{filename_species}.{assembly_name}.{assembly_version}.gtf.gz and we are attempting to extract {assembly_version} and {assembly_name}. """ original_files = job_context["original_files"] for og_file in original_files: if ".gtf.gz" in og_file.source_filename: extensionless_url = og_file.source_url[:-7] version_start_index = extensionless_url.rfind(".") + 1 job_context["assembly_version"] = extensionless_url[version_start_index:] # Decrement the index to skip the period. versionless_url = extensionless_url[:version_start_index-1] assembly_name_start_index = versionless_url.rfind(".") + 1 job_context["assembly_name"] = versionless_url[assembly_name_start_index:] return job_context
b78513b826c0a12bf87563095e33320aee328b76
1,729
async def cors_handler(request, handler): """Middleware to add CORS response headers """ response = await handler(request) response.headers['Access-Control-Allow-Origin'] = '*' return response
c9f33261b1fb2e6dc3ab3139e657106a94c5bfd1
1,730
import hmac import hashlib def get_proxy_signature(query_dict, secret): """ Calculate the signature of the given query dict as per Shopify's documentation for proxy requests. See: http://docs.shopify.com/api/tutorials/application-proxies#security """ # Sort and combine query parameters into a single string. sorted_params = '' for key in sorted(query_dict.keys()): sorted_params += "{0}={1}".format(key, ",".join(query_dict.getlist(key))) signature = hmac.new(secret.encode('utf-8'), sorted_params.encode('utf-8'), hashlib.sha256) return signature.hexdigest()
c234f18c1d44a936c4844ae2fe1b912a624eef61
1,732
import copy def __yaml_tag_test(*args, **kwargs): """YAML tag constructor for testing only""" return copy.deepcopy(args), copy.deepcopy(kwargs)
0abeb68caf32912c7b5a78dacbc89e537061a144
1,735
import hashlib def cal_md5(content): """ 计算content字符串的md5 :param content: :return: """ # 使用encode result = hashlib.md5(content.encode()) # 打印hash md5 = result.hexdigest() return md5
0cd26654c364e34ecc27b0a0b4d410a539e286c3
1,736
def _inv_Jacobian_2D(J, detJ): """ manually invert 2x2 jacobians J in place """ tmp = J[:, 1, 1, :] / detJ J[:, 0, 1, :] = -J[:, 0, 1, :] / detJ J[:, 1, 0, :] = -J[:, 1, 0, :] / detJ J[:, 1, 1, :] = J[:, 0, 0, :] / detJ J[:, 0, 0, :] = tmp return J
23b1ff231e32f09f09dbae781f7e97354f3ca811
1,737
def to_square_feet(square_metres): """Convert metres^2 to ft^2""" return square_metres * 10.7639
50510aad230efcb47662936237a232662fef5596
1,738
import json def load_id_json_file(json_path): """ load the JSON file and get the data inside all this function does is to call json.load(f) inside a with statement Args: json_path (str): where the target JSON file is Return: ID list (list): all the data found in the file """ with open(json_path, 'r') as f: return json.load(f)
fd0f7fb73636cdf407b4de3e1aa3ae66dcc8f964
1,739
import os def get_py_path(pem_path): """Returns the .py filepath used to generate the given .pem path, which may or may not exist. Some test files (notably those in verify_certificate_chain_unittest/ have a "generate-XXX.py" script that builds the "XXX.pem" file. Build the path to the corresponding "generate-XXX.py" (which may or may not exist).""" file_name = os.path.basename(pem_path) file_name_no_extension = os.path.splitext(file_name)[0] py_file_name = 'generate-' + file_name_no_extension + '.py' return os.path.join(os.path.dirname(pem_path), py_file_name)
0bc97d23138c44e051282fdfa22517f1289ab65a
1,745
import re def parse_path_length(path): """ parse path length """ matched_tmp = re.findall(r"(S\d+)", path) return len(matched_tmp)
762e2b86fe59689800ed33aba0419f83b261305b
1,747
def check_permisions(request, allowed_groups): """ Return permissions.""" try: profile = request.user.id print('User', profile, allowed_groups) is_allowed = True except Exception: return False else: return is_allowed
4bdb54bd1edafd7a0cf6f50196d470e0d3425c66
1,748
def ask_name(question: str = "What is your name?") -> str: """Ask for the users name.""" return input(question)
1cc9ec4d3bc48d7ae4be1b2cf8eb64a0b4f94b23
1,750
def last(*args): """Return last value from any object type - list,tuple,int,string""" if len(args) == 1: return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1] return args[-1]
ad8d836597dd6a5dfe059756b7d8d728f6ea35fc
1,751
def is_float(s): """ Detertmine if a string can be converted to a floating point number. """ try: float(s) except: return False return True
2df52b4f8e0835d9f169404a6cb4f003ca661fff
1,752
def process_mean_results(data, capacity, constellation, scenario, parameters): """ Process results. """ output = [] adoption_rate = scenario[1] overbooking_factor = parameters[constellation.lower()]['overbooking_factor'] constellation_capacity = capacity[constellation] max_capacity = constellation_capacity['capacity_kmsq'] number_of_satellites = constellation_capacity['number_of_satellites'] satellite_coverage_area = constellation_capacity['satellite_coverage_area'] for idx, item in data.iterrows(): users_per_km2 = item['pop_density_km2'] * (adoption_rate / 100) active_users_km2 = users_per_km2 / overbooking_factor if active_users_km2 > 0: per_user_capacity = max_capacity / active_users_km2 else: per_user_capacity = 0 output.append({ 'scenario': scenario[0], 'constellation': constellation, 'number_of_satellites': number_of_satellites, 'satellite_coverage_area': satellite_coverage_area, 'iso3': item['iso3'], 'GID_id': item['regions'], 'population': item['population'], 'area_m': item['area_m'], 'pop_density_km2': item['pop_density_km2'], 'adoption_rate': adoption_rate, 'users_per_km2': users_per_km2, 'active_users_km2': active_users_km2, 'per_user_capacity': per_user_capacity, }) return output
0619c397a21d27440988c4b23284e44700ba69eb
1,754
def identify_ossim_kwl(ossim_kwl_file): """ parse geom file to identify if it is an ossim model :param ossim_kwl_file : ossim keyword list file :type ossim_kwl_file : str :return ossim kwl info : ossimmodel or None if not an ossim kwl file :rtype str """ try: with open(ossim_kwl_file, encoding="utf-8") as ossim_file: content = ossim_file.readlines() geom_dict = {} for line in content: (key, val) = line.split(": ") geom_dict[key] = val.rstrip() if "type" in geom_dict: if geom_dict["type"].strip().startswith("ossim"): return geom_dict["type"].strip() return None except Exception: # pylint: disable=broad-except return None
9a63a8b5e7ece79b11336e71a8afa5a703e3acbc
1,755
def convert_cbaois_to_kpsois(cbaois): """Convert coordinate-based augmentables to KeypointsOnImage instances. Parameters ---------- cbaois : list of imgaug.augmentables.bbs.BoundingBoxesOnImage or list of imgaug.augmentables.bbs.PolygonsOnImage or list of imgaug.augmentables.bbs.LineStringsOnImage or imgaug.augmentables.bbs.BoundingBoxesOnImage or imgaug.augmentables.bbs.PolygonsOnImage or imgaug.augmentables.bbs.LineStringsOnImage Coordinate-based augmentables to convert, e.g. bounding boxes. Returns ------- list of imgaug.augmentables.kps.KeypointsOnImage or imgaug.augmentables.kps.KeypointsOnImage ``KeypointsOnImage`` instances containing the coordinates of input `cbaois`. """ if not isinstance(cbaois, list): return cbaois.to_keypoints_on_image() kpsois = [] for cbaoi in cbaois: kpsois.append(cbaoi.to_keypoints_on_image()) return kpsois
6eee2715de3bfc76fac9bd3c246b0d2352101be1
1,756
def get_zcl_attribute_size(code): """ Determine the number of bytes a given ZCL attribute takes up. Args: code (int): The attribute size code included in the packet. Returns: int: size of the attribute data in bytes, or -1 for error/no size. """ opts = (0x00, 0, 0x08, 1, 0x09, 2, 0x0a, 3, 0x0b, 4, 0x0c, 5, 0x0d, 6, 0x0e, 7, 0x0f, 8, 0x10, 1, 0x18, 1, 0x19, 2, 0x1a, 3, 0x1b, 4, 0x1c, 5, 0x1d, 6, 0x1e, 7, 0x1f, 8, 0x20, 1, 0x21, 2, 0x22, 3, 0x23, 4, 0x24, 5, 0x25, 6, 0x26, 7, 0x27, 8, 0x28, 1, 0x29, 3, 0x2a, 3, 0x2b, 4, 0x2c, 5, 0x2d, 6, 0x2e, 7, 0x2f, 8, 0x30, 1, 0x31, 2, 0x38, 2, 0x38, 4, 0x39, 8, 0x41, -1, 0x42, -1, 0x43, -1, 0x44, -1, 0x48, -1, 0x4c, -1, 0x50, -1, 0x51, -1, 0xe0, 4, 0xe1, 4, 0xe2, 4, 0xe8, 2, 0xe9, 2, 0xea, 4, 0xf0, 8, 0xf1, 16, 0xff, 0) for i in range(0, len(opts), 2): if code == opts[i]: return opts[i + 1] return -1
99782c86be2413410c6819a59eadf0daba326af2
1,758
def _get_function_name_and_args(str_to_split): """ Split a string of into a meta-function name and list of arguments. @param IN str_to_split String to split @return Function name and list of arguments, as a pair """ parts = [s.strip() for s in str_to_split.split(" | ")] if len(parts) < 2: raise Exception("Invalid meta function string: %s" % str_to_split) func_name = parts[0] func_args = parts[1:] return func_name, func_args
1dae51c87e727d7fa6a3a8012f9768b9ca3364e7
1,759
def list_of_paths(): """ It lists all the folders which not contain PET images """ return ['.DS_Store', 'localizer', 'Space_3D_T2_FLAIR_sag_p2', 'AXIAL_FLAIR', 'MPRAGE_ADNI_confirmed_REPEATX2', 'Axial_PD-T2_TSE', 'Axial_PD-T2_TSE_repeat', 'MPRAGE_SAG_ISO_p2_ND', 'Axial_PD-T2_TSE_confirmed', 'MPRAGESAGISOp2ND', 'MPRAGE_ADNI_confirmed', 'MPRAGE_ADNI_confirmed_repeat', 'MPRAGE_SAG_ISO_p2', 'MPRAGE', 'MPRAGE_ADNI_confirmed_REPEAT', 'Axial_PD-T2_TSE_confirmed_repeat', 'MPRAGE_ADNI_conf_REPEAT', 'Space_3D_T2_FLAIR_sag_p2_REPEAT', 'MPRAGE_ADNI_confirmed_RPT', 'Brain_256_1.6_zoom_4_x_4_iter', 'Space_3D_T2_FLAIR_sag_REPEAT', 'Axial_PD-T2_TSE_RPTconfirmed', 'Axial_PD-T2_TSE_RPT_confirmed', 'Axial_PD-T2_TSE_confirmed_REPEAT', 'flair_t2_spc_irprep_ns_sag_p2_1mm_iso', 'localiser']
bc74024d49396f80947b3cb0a45066381b7d3af4
1,761
import torch def index_initial(n_batch, n_ch, tensor=True): """Tensor batch and channel index initialization. Args: n_batch (Int): Number of batch. n_ch (Int): Number of channel. tensor (bool): Return tensor or numpy array Returns: Tensor: Batch index Tensor: Channel index """ batch_index = [] for i in range(n_batch): batch_index.append([[i]] * n_ch) ch_index = [] for i in range(n_ch): ch_index += [[i]] ch_index = [ch_index] * n_batch if tensor: batch_index = torch.tensor(batch_index) ch_index = torch.tensor(ch_index) if torch.cuda.is_available(): batch_index = batch_index.cuda() ch_index = ch_index.cuda() return batch_index, ch_index
52a16ad4afcf931ba4cda9c014d47050970995c5
1,763
def calc_floodzone(row): """Extracts the FEMAZONE of an SFHA based on each row's attributes. This function acts on individual rows of a pandas DataFrame using the apply built-in. Parameters ---------- row : Pandas Series A row of a pandas DataFrame Returns ------- str The flood zone designation for an SFHA """ if row["FLD_ZONE"] == 'AO': zone = 'AO' + str(round(row['DEPTH'])) elif row["FLD_ZONE"] == 'AH': zone = 'AH' + str(round(row["STATIC_BFE"])) else: zone = row["FLD_ZONE"] return zone
5bb6f3f7cfc1b6bce41ad7a752845287759c16ad
1,766
import re def remove_space(text): """ Funcion que elimina espacios :param str text: texto a procesar """ return re.sub(r"\s+", " ", text).strip()
729d26bb6acbaa8da4c945d2ea6646ebb90f3122
1,767
import base64 def getFilePathBase(): """ 获取请求url文件的文件路径 :return: php->base64 code """ code = """ @ini_set("display_errors","0"); @set_time_limit(0); @set_magic_quotes_runtime(0); header("Content-Type:application/json"); $res = array();$res["path"] = dirname(__FILE__); echo ("<ek>"); echo json_encode($res); echo ("</ek>"); die(); """ return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
afcb1a5bf2972a2b13a32edcd8a9b968742bf7f3
1,768
import logging def say_hello(name): """ Log client's name which entered our application and send message to it """ logging.info('User %s entered', name) return 'Hello {}'.format(name)
b79865cca34d1430bf47afabf7c96741d59ac560
1,770
def merge_dicts(dictionaries): """Merges multiple separate dictionaries into a single dictionary. Parameters ---------- dictionaries : An iterable container of Python dictionaries. Returns ------- merged : A single dictionary that represents the result of merging the all the dicts in ``dictionaries``. Example ------- The primary purpose of this function is to create a single dictionary by combining multiple singleton dictionaries, as shown in the following example: >>> dicts = [{'a': 1}, {'b': 2}, {'c': 3}] >>> eb.merge_dicts(dicts) {'a': 1, 'c': 3, 'b': 2} """ merged = dictionaries[0].copy() for i in range(1, len(dictionaries)): merged.update(dictionaries[i]) return merged
1a2b5f3c539937e2e27a55ce3914f7368f0a7296
1,771