content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def certificate(cert_name): """Return the path to the PEM file with the given name.""" return os.path.join(os.path.dirname(__file__), 'lib', cert_name)
5dc02c85158ae7b020f069976a581d41f31d338c
1,772
def boolToYes(b): """Convert a Boolean input into 'yes' or 'no' Args: b (bool): The Boolean value to be converted Returns: str: 'yes' if b is True, and 'no' otherwise. """ if b: return "yes" else: return "no"
ff94b66b5a166592062bf1d5b286b425e7997304
1,773
def vint_mask_for_length(length): """ Returns the bitmask for the first byte of a variable-length integer (used for element ID and size descriptors). :arg length: the length of the variable-length integer :type length: int :returns: the bitmask for the first byte of the variable-length integer :rtype: int """ return 0b10000000 >> (length - 1)
92fe3cb0fa09713ff4b650349294a2b241bb3918
1,774
from itertools import tee def parse(tokens): """ S-expr ::= ( S-expr* ) | AtomSymbol | ' S-expr ' S-expr = (quote S-expr) """ def _parse(tokens): while True: token = next(tokens) if token == "(": s_expr = [] while True: tokens, tokens_clone = tee(tokens) if next(tokens_clone) == ")": next(tokens) return tuple(s_expr) s_expr.append(_parse(tokens)) elif token == ")" or token == "": raise Exception("parse error") elif token == "'": return "quote", _parse(tokens) else: return token s_expr = _parse(tokens) if next(tokens) != "": raise Exception("parse error") return s_expr
90c8e3cd8482899749d30d5344390cfd5f24989f
1,775
def find_children(node, tag, xml_ns, ns_key): """ Finds the collection of children nodes Parameters ---------- node : ElementTree.Element tag : str xml_ns : None|dict ns_key : None|str """ if xml_ns is None: return node.findall(tag) elif ns_key is None: return node.findall('default:{}'.format(tag), xml_ns) else: return node.findall('{}:{}'.format(ns_key, tag), xml_ns)
b51d9f588661c3f609dc53adaa328f974e17d5fb
1,776
import re def normalize_string(string, ignore_spaces, ignore_punctuation): """Normalizes strings to prepare them for crashing comparison.""" string = string.upper() if ignore_punctuation: string = re.sub(r"[^1-9a-z \n\r\t]", "", string, flags=re.I) if ignore_spaces: string = re.sub(r"\w+", "", string) else: string = string.strip() string = re.sub(r"[ \n\r\t]+", " ", string) return string
31de2b9644eb0943470430c6c3f2ea8a94dfb3cf
1,777
def reduce_expr(expr): """ Reduces a boolean algebraic expression based on the identity X + XY = X Args: expr (str): representation of the boolean algebraic expression Returns: A string representing the reduced algebraic expression """ reduced = True for term in expr: matches = [t for t in expr if t!=term and len(set(term).intersection(set(t))) == len(term)] if(matches): reduced = False if reduced: return expr new_expr = [] temp_expr = expr for term in expr: #find the term that differs with it by at most one position matches = [t for t in expr if t!=term and len(set(term).intersection(set(t))) == len(term)] if(matches): new_expr.append(term) temp_expr.remove(term) for match in matches: temp_expr.remove(match) #if such a term is found reduce it by the rule x+ xy =x #remove both terms from the list and add to new expression #if no term is found add the term to the next expression expr = reduce_expr(new_expr+temp_expr) return expr
2c278e6ea6f133c51c5e98796f288f366fd10cb3
1,778
def createVskDataDict(labels,data): """Creates a dictionary of vsk file values from labels and data. Parameters ---------- labels : array List of label names for vsk file values. data : array List of subject measurement values corresponding to the label names in `labels`. Returns ------- vsk : dict Dictionary of vsk file values. Dictionary keys correspond to names in `labels` and dictionary values correspond to values in `data`. Examples -------- This example tests for dictionary equality through python instead of doctest since python does not guarantee the order in which dictionary elements are printed. >>> labels = ['MeanLegLength', 'LeftKneeWidth', 'RightAnkleWidth'] >>> data = [940.0, 105.0, 70.0] >>> res = createVskDataDict(labels, data) >>> res == {'MeanLegLength':940.0, 'LeftKneeWidth':105.0, 'RightAnkleWidth':70.0} True """ vsk={} for key,data in zip(labels,data): vsk[key]=data return vsk
a4669e4a173aaeef534d13faceaeab869eb62cb3
1,779
def h_eval(data): """ Function takes dictionary Evaluate values and convert string to correct type (boolean/int/float/long/string) """ if isinstance(data, dict): for _k in list(data.keys()): data[_k] = h_eval(data[_k]) if data[_k] is None or (isinstance(data[_k], dict) and not data[_k]): data.pop(_k) return data if isinstance(data, list) or isinstance(data, tuple) or isinstance(data, set): res = [] for _k in data: res.append(h_eval(_k)) if isinstance(data, tuple): return tuple(res) if isinstance(data, set): return set(res) return res try: if isinstance(data, str): if data.endswith("%"): data = data[:-1] if data.lower() == "false": return False if data.lower() == "true": return True if data.lower() == "n/e": return None try: return int(data) except Exception: pass try: return float(data) except Exception: pass return data except Exception: return data
28a3529283719cab321c712a9e8723d5ff314ef8
1,782
import math def fmt_bytes(size_bytes): """Return a nice 'total_size' string with Gb, Mb, Kb, and Byte ranges""" units = ["Bytes", "KB", "MB", "GB"] if size_bytes == 0: return f"{0} Bytes" for unit in units: digits = int(math.log10(size_bytes)) + 1 if digits < 4: return f"{round(size_bytes, 1)} {unit}" size_bytes /= 1024 return f"{size_bytes} TB"
40613403092bdc9d8dca8b0b487d5af6c887b075
1,783
def grid_reference_to_northing_easting(grid_reference): """ Needs to include reference :param grid_reference: :return: """ grid_reference = grid_reference.strip().replace(' ', '') if len(grid_reference) == 0 or len(grid_reference) % 2 == 1 or len(grid_reference) > 12: return None, None grid_reference = grid_reference.upper() if grid_reference[0] not in 'STNOH' or grid_reference[1] == 'I': return None, None e = n = 0 c = grid_reference[0] if c == 'T': e = 500000 elif c == 'N': n = 500000 elif c == 'O': e = 500000 n = 500000 elif c == 'H': n = 1000000 c = ord(grid_reference[1]) - 66 if c < 8: # J c += 1 e += (c % 5) * 100000 n += (4 - c/5) * 100000 c = grid_reference[2:] try: s = c[:int(len(c)/2)] while len(s) < 5: s += '0' e += int(s) s = c[int(-len(c)/2):] while len(s) < 5: s += '0' n += int(s) except Exception as error: print("Caught exception during conversion. Issue: {}".format(error)) return None, None # Data is converted into integers return int(e), int(n)
3da96e36f9be1e369d0425f2b9c34e432eb5ca77
1,785
from typing import Dict def _remove_attribute(note_dict: Dict, attribute: str) -> Dict: """ Create a copy of the note where a single attribute is removed """ d = dict(note_dict) d[attribute] = None return d
d2659b887c1a2a7c67f6785889db2aa2039f9627
1,786
def write_site_pair_score_data_to_file(sorted_data_list, output_file_path, algorithm_used, max_iterations=None, num_threads=None): """Since site indices are starting from zero within python we add one to each of them when they are being written to output file. """ formater = '#' + '='*100 formater += '\n' with open(output_file_path, 'w') as fh: fh.write(formater) fh.write('# This result is computed using {}\n'.format(algorithm_used)) if max_iterations is not None: fh.write('# maximum number of gradient decent iterations: {}\n'.format(max_iterations)) if num_threads is not None: fh.write('# Number of threads used: {}\n'.format(num_threads)) fh.write('# The first and second columns are site pairs. The third column represents interaction score\n') fh.write(formater) for site_pair, score in sorted_data_list: i, j = site_pair[0] + 1, site_pair[1] + 1 fh.write('{}\t{}\t{}\n'.format(i, j, score)) return None
5ee4ec97fbc1b6f86e36946ee6d925604858b063
1,789
import os def check_icon_arg(src, default): """ Checks if icon arguments are valid: either a URL or an absolute path. :param src: Source of the icon :param default: default value of the icon :return: src (possibly pre-pended with "file://") """ if src != default: # check if URl if not src.startswith('https://') and not src.startswith('http://'): # Either a file or incorrect input if os.path.isabs(src): src = "file://" + src else: raise IOError( f"Please provide a valid URL or valid *absolute* path to icon: {src}" ) return src
33db8c760df0bdf5e26474cdc281e4d6504cca27
1,790
def only_half_radius( subsampled_radius: float, full_diameter: float, radius_constraint: float ): """ Check if radius is smaller than fraction of full radius. """ assert 0.0 <= radius_constraint <= 1.0 return subsampled_radius <= ((full_diameter / 2) * radius_constraint)
565c301932d5445e8bbb594085e65df63814663a
1,793
def sanitise_description(original: str) -> str: """ Remove newlines from ticket descriptions. :param original: the string to sanitise :return: the same string, with newlines as spaces """ return original.replace("\n", " ")
741aa7df758fb342a0d9a0fa182d24a643f5dbbc
1,794
def mockselect(r, w, x, timeout=0): # pylint: disable=W0613 """Simple mock for select() """ readable = [s for s in r if s.ready_for_read] return readable, w[:], []
920810aea2f7813885805011d646ddaabfd1901c
1,795
from typing import List def merge(input_list: List, low: int, mid: int, high: int) -> List: """ sorting left-half and right-half individually then merging them into result """ result = [] left, right = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0)) input_list[low : high + 1] = result + left + right return input_list
0d53b0670899b4853563c9dda0eb47a8c66bae00
1,796
def splitmod(n, k): """ Split n into k lists containing the elements of n in positions i (mod k). Return the heads of the lists and the tails. """ heads = [None]*k tails = [None]*k i = 0 while n is not None: if heads[i] is None: heads[i] = n if tails[i] is not None: tails[i].next = n tails[i] = n n.next, n = None, n.next i = (i+1)%k return heads, tails
a4a1885ce0c9541c534145d0236996a511cbdd00
1,797
import os def dirname_to_prefix(dirname): """Return filename prefix from dirname""" return os.path.basename(dirname.strip('/')).split("-", maxsplit=1)[1]
8e1124e66669dd051987c55812052d36c78bdc4a
1,798
def binary_accuracy(a,b): """ Calculate the binary acc. """ return ((a.argmax(dim=1) == b).sum().item()) / a.size(0)
5f9b09199b2e88169a0cbe9ee7cb4bb351c09e4a
1,799
def _generate_var_name(prefix, field_name): """ Generate the environment variable name, given a prefix and the configuration field name. Examples: >>> _generate_var_name("", "some_var") "SOME_VAR" >>> _generate_var_name("my_app", "some_var") "MY_APP_SOME_VAR" :param prefix: the prefix to be used, can be empty :param field_name: the name of the field from which the variable is derived """ return ( "_".join((prefix, field_name)).upper() if prefix else field_name.upper() )
9065d1deb76789582e68df779ec2c961a7d4aedc
1,800
def num_decodings2(enc_mes): """ :type s: str :rtype: int """ if not enc_mes or enc_mes.startswith('0'): return 0 stack = [1, 1] for i in range(1, len(enc_mes)): if enc_mes[i] == '0': if enc_mes[i-1] == '0' or enc_mes[i-1] > '2': # only '10', '20' is valid return 0 stack.append(stack[-2]) elif 9 < int(enc_mes[i-1:i+1]) < 27: # '01 - 09' is not allowed stack.append(stack[-2]+stack[-1]) else: # other case '01, 09, 27' stack.append(stack[-1]) return stack[-1]
ae4ff7181e34003dcc7ec264ed2727bc716708a5
1,801
import decimal def truncate_decimal_places(value: decimal.Decimal, places: int = 1) -> float: """ Truncate a float (i.e round towards zero) to a given number of decimal places. NB: Takes a decimal but returns a float! >>> truncate_decimal_places(12.364, 1) 12.3 >>> round_decimal_places(-12.364, 1) -12.3 # -12.3 is bigger than -12.4 >>> round_decimal_places(12.364, 0) 12.0 # rounding to 0 returns float with no decmial part """ if places == 0: quantize_string = "1" else: quantize_string = "0." + ((places - 1) * "0") + "1" exponent = decimal.Decimal(quantize_string) decimal_result = value.quantize(exponent, rounding=decimal.ROUND_DOWN) return float(decimal_result)
11b924a5e4f6560674b1f7378f6a4001a3265a97
1,803
import pathlib import sys def get_resource_path(relative_path): """ relative_path = "data/beach.jpg" relative_path = pathlib.Path("data") / "beach.jpg" relative_path = os.path.join("data", "beach.jpg") """ rel_path = pathlib.Path(relative_path) dev_base_path = pathlib.Path(__file__).resolve().parent.parent base_path = getattr(sys, "_MEIPASS", dev_base_path) return base_path / rel_path
becad13eb95d988b49ea7ef141e9c3436379af6e
1,804
def flip_dict(d): """Returns a dict with values and keys reversed. Args: d: The dict to flip the values and keys of. Returns: A dict whose keys are the values of the original dict, and whose values are the corresponding keys. """ return {v: k for k, v in d.items()}
c9c960209663639613739979c0dc4066a63c44cb
1,805
import subprocess def branch_exists(branch: str) -> bool: """ Check if the branch exists in the current Git repo. """ try: subprocess.check_call( ["git", "rev-parse", "--quiet", "--verify", branch], stdout=subprocess.DEVNULL, ) return True except subprocess.CalledProcessError: return False
352adba56d824fff29bf5c91788a1154bea64f1b
1,806
def has_sample(args): """Returns if some kind of sample id is given in args. """ return args.sample or args.samples or args.sample_tag
c2ae87acb11232d7f56cb9e09eb8509720669058
1,807
import numpy import math def pearsonr(a0, a1): """Pearson r, product-moment correlation coefficient, of two samples. Covariance divided by product of standard deviations. https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample """ n = len(a0) assert n == len(a1) if n == 0: # No data, so no notion of correlation. return float('NaN') a0 = numpy.array(a0) a1 = numpy.array(a1) m0 = numpy.mean(a0) m1 = numpy.mean(a1) num = numpy.sum((a0 - m0)*(a1 - m1)) den0_sq = numpy.sum((a0 - m0)**2) den1_sq = numpy.sum((a1 - m1)**2) den = math.sqrt(den0_sq*den1_sq) if den == 0.0: # No variation in at least one column, so no notion of # correlation. return float('NaN') r = num / den # Clamp r in [-1, +1] in case of floating-point error. r = min(r, +1.0) r = max(r, -1.0) return r
64135ebc840bb1673ece1aec24f22c960f89af20
1,809
def __column(matrix, i): """Returns columns from a bidimensional Python list (a list of lists)""" return [row[i] for row in matrix]
f455245eb8bbda90f185479afc85eecfb481c70c
1,810
import copy import os def write_stats_file(run_output_dict): """Writes a dummy PolyChord format .stats file for tests functions for processing stats files. This is written to: base_dir/file_root.stats Also returns the data in the file as a dict for comparison. Parameters ---------- run_output_dict: dict Output information to write to .stats file. Must contain file_root and base_dir. If other settings are not specified, default values are used. Returns ------- output: dict The expected output of nestcheck.process_polychord_stats(file_root, base_dir) """ mandatory_keys = ['file_root', 'base_dir'] for key in mandatory_keys: assert key in run_output_dict, key + ' not in run_output_dict' default_output = {'logZ': 0.0, 'logZerr': 0.0, 'logZs': [0.0], 'logZerrs': [0.0], 'ncluster': 1, 'nposterior': 0, 'nequals': 0, 'ndead': 0, 'nlike': 0, 'nlive': 0, 'avnlike': 0.0, 'avnlikeslice': 0.0, 'param_means': [0.0, 0.0, 0.0], 'param_mean_errs': [0.0, 0.0, 0.0]} allowed_keys = set(mandatory_keys) | set(default_output.keys()) assert set(run_output_dict.keys()).issubset(allowed_keys), ( 'Input dict contains unexpected keys: {}'.format( set(run_output_dict.keys()) - allowed_keys)) output = copy.deepcopy(run_output_dict) for key, value in default_output.items(): if key not in output: output[key] = value # Make a PolyChord format .stats file corresponding to output file_lines = [ 'Evidence estimates:', '===================', (' - The evidence Z is a log-normally distributed, with location and ' 'scale parameters mu and sigma.'), ' - We denote this as log(Z) = mu +/- sigma.', '', 'Global evidence:', '----------------', '', 'log(Z) = {0} +/- {1}'.format( output['logZ'], output['logZerr']), '', '', 'Local evidences:', '----------------', ''] for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])): file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format( str(i + 1).rjust(2), lz, lzerr)) file_lines += [ '', '', 'Run-time information:', '---------------------', '', ' ncluster: 0 / 1', ' nposterior: {0}'.format(output['nposterior']), ' nequals: {0}'.format(output['nequals']), ' ndead: {0}'.format(output['ndead']), ' nlive: {0}'.format(output['nlive']), ' nlike: {0}'.format(output['nlike']), ' <nlike>: {0} ( {1} per slice )'.format( output['avnlike'], output['avnlikeslice']), '', '', 'Dim No. Mean Sigma'] for i, (mean, meanerr) in enumerate(zip(output['param_means'], output['param_mean_errs'])): file_lines.append('{0} {1} +/- {2}'.format( str(i + 1).ljust(3), mean, meanerr)) file_path = os.path.join(output['base_dir'], output['file_root'] + '.stats') with open(file_path, 'w') as stats_file: stats_file.writelines('{}\n'.format(line) for line in file_lines) return output
5a3d7b81d8315fd39d5348f9140a001b020c7584
1,811
import json def dict_serialize(seqlen_dist_dict): """ dict->str Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]" Why? Because this format plays nice with shell script that runs xlmr_bench. Avoids curly braces and spaces that makes shell script str input unhappy. """ seqlen_dist_lst = list(seqlen_dist_dict.items()) seqlen_dist_str = json.dumps(seqlen_dist_lst) seqlen_dist_str = seqlen_dist_str.replace(" ", "") # remove spaces return seqlen_dist_str
a61c51debff922d128fbb26bbe2121063511d4c4
1,812
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name): """Creates a list of paths where the files have a predefined prefix, an incremental number and a predefined suffix on their name, respectively. Eg.: img01.zdf Args: path: a path that leads to the files directory file_prefix_name: a string that comes before the number number_of_digits_zfill: a number of digits in the number file_suffix_name: a string that comes after the number Returns: list_of_paths: list of appended paths """ num = 1 list_of_paths = [] while True: file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}" list_of_paths.append(file_path) next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}" if not next_file_path.exists(): return list_of_paths num = num + 1
4850edbbf544284b0736ee52188bd53119c50fdf
1,813
import re def tamper(payload, **kwargs): """ Replaces instances of UNION with -.1UNION Requirement: * MySQL Notes: * Reference: https://raw.githubusercontent.com/y0unge/Notes/master/SQL%20Injection%20WAF%20Bypassing%20shortcut.pdf >>> tamper('1 UNION ALL SELECT') '1-.1UNION ALL SELECT' >>> tamper('1" UNION ALL SELECT') '1"-.1UNION ALL SELECT' """ return re.sub(r"(?i)\s+(UNION )", r"-.1\g<1>", payload) if payload else payload
cbf4fc5b81bc7760aafe6cf65fa498945285e5bb
1,814
def get_source_tokens_tensor(src_tokens): """ To enable integration with PyText, src_tokens should be able to support more features than just token embeddings. Hence when dictionary features are passed from PyText it will be passed as a tuple (token_embeddings, dict_feat, ..). Thus, in this case where we need the source tokens tensor (eg to calculate batch size = source_tokens_tensor.size(0)), we get the first element on the tuple which is always guaranteed to be source tokens and do the necessary operation. eg : bsz, _ = get_source_tokens_tensor(source_tokens)[0].size(0) """ if type(src_tokens) is tuple: return src_tokens[0] else: return src_tokens
cf20ceeba82c595dc62b267794ca758360e0386b
1,815
def merge_config_and_args(config, args): """ Creates a configuration dictionary based upon command line arguments. Parameters ---------- config : dict configurations loaded from the config file args : object arguments and there values which could be \ passed in the command line. Returns ------- dict updated configuration dictionary \ with arguments passed in command line. """ arg_dict = vars(args) stripped_dict = { k: v for k, v in arg_dict.items() if (v is not None) } return {**config, **stripped_dict}
3935cfc525fb99b9513a608ef0e5e8fd7de708f3
1,816
import random import string def get_random_string(length: int) -> str: """ With combination of lower and upper case """ return ''.join(random.choice(string.ascii_letters) for i in range(length))
b9d0c760e92603a4fe1f625615b96a1c2265f22a
1,817
from typing import Union from pathlib import Path from typing import List from typing import Dict import json def readJSONLFile(file_name: Union[str, Path]) -> List[Dict]: """ Read a '.jsonl' file and create a list of dicts Args: file_name: `Union[str,Path]` The file to open Returns: The list of dictionaries read from the 'file_name' """ lines = ( open(file_name, 'r', encoding='utf-8').readlines() if isinstance(file_name, str) else file_name.read_text('utf-8').splitlines(False) ) return [json.loads(line) for line in lines]
8e33fad766a255578179828dc76ec793c02f90b9
1,818
def makeSiteWhitelist(jsonName, siteList): """ Provided a template json file name and the site white list from the command line options; return the correct site white list based on some silly rules """ if 'LHE_PFN' in jsonName: siteList = ["T1_US_FNAL"] print("Overwritting SiteWhitelist to: %s" % siteList) elif 'LHE' in jsonName or 'DQMHarvest' in jsonName: siteList = ["T2_CH_CERN"] print("Overwritting SiteWhitelist to: %s" % siteList) return siteList
8f8b11739a30b4338b8dd31afb6c3c57545af6d0
1,819
from typing import List def max_crossing_sum(lst: List[int], mid: int, n: int) -> int: """ Parameter <mid> is the floor middle index of <lst>. Parameter <n> is the length of the input list <lst>. Pre: <lst> is a list of integers and len(lst) >= 2. Post: returns the maximum contiguous crossing sum starting from the middle of <lst>. >>> max_crossing_sum([2, -5, 8, -6, 10, -2], 3, 6) 12 """ left_sum, right_sum, total = 0, 0, 0 # initialize values # max sum of the left half k = mid - 1 i = 0 while i < mid: total += lst[k - i] i += 1 if total > left_sum: left_sum = total # # max sum the left half # for i in range(mid - 1, -1, -1): # iterate from index mid - 1...0 backward # total += lst[i] # if total > left_sum: # left_sum = total total = 0 # max sum the right half for i in range(mid, n): # iterate from index mid...n - 1 total += lst[i] if total > right_sum: right_sum = total # note: left_sum and right_sum are each at least zero return left_sum + right_sum
3d873907cb7ed0c14152ec3c2e92a742bd52aa85
1,820
import argparse def parse_arguments(): """ Parse the argument list and return the location of a geometry file, the location of a data file, whether or not to save images with a timestamp of the four default plot windows and the VisIt session file in the current directory, and whether or not to open the session file in VisIt. Input: ______ none Returns: ________ args: Namespace User supplied geometry file location, data file location, and indication if the user wants images of the plot windows with a timestamp and the session file saved and opened in VisIt. """ parser = argparse.ArgumentParser(description="Create default VisIt output.") parser.add_argument("geofile", type=str, help="Provide a path to the geometry file." ) parser.add_argument("datafile", type=str, help="Provide a path to the data file." ) parser.add_argument("-i", "--images", action="store_true", help="Indicate whether to save images of plot windows." ) parser.add_argument("-t", "--timestamp", action="store_true", help="Indicate whether to remove the timestamp from images." ) parser.add_argument("-s", "--sessionfile", action="store_true", help="Indicate whether to save the VisIt session file." ) parser.add_argument("-v", "--openvisit", action="store_false", help="Indicate whether to open the session file in VisIt." ) args = parser.parse_args() return args
5ba0e6e65801cfc93cc2864368eb2fac4b75e840
1,821
def get_duration(df): """Get duration of ECG recording Args: df (DataFrame): DataFrame with time/voltage data Returns: float: duration of ECG recording """ start = df.time.iloc[0] end = df.time.iloc[-1] duration = end - start return duration
77698afc8ef7af557628d5fea760dc101c3e6112
1,823
import click import subprocess def launch(cmd, args=None, separate_terminal=False, in_color='cyan', silent=False, should_wait=True): """ Launch a system command :param cmd: The command to run :param args: The arguments to pass to that command (a str list) :param separate_terminal: Should we open a new terminal window :param in_color: The color to output :param silent: Echo the system command to the current stdout? :param should_wait: In the case of a separate terminal, should we wait for that to finish? :return: The error code returned from the command. If not wait to complete, this will only return 0. """ if args is None: args = [] args_in = [cmd] if separate_terminal or not should_wait: pre_args = ['start'] if should_wait: pre_args.append('/wait') pre_args.append(cmd) pre_args.extend(args) args_in = pre_args else: args_in.extend(args) if not silent: click.secho(' '.join(args_in), fg=in_color) return subprocess.call(args_in, shell=separate_terminal or not should_wait)
48de0ef8b80973fede05444ec78ab09de6b783b9
1,824
def calc_triangular_number(n: int): """ A triangular number or triangle number counts objects arranged in an equilateral triangle. More info: https://www.mathsisfun.com/algebra/triangular-numbers.html :param n: :return: """ return int((n * (n + 1)) / 2)
e3bfefd6e0e9451849cee8f6da252ec128285c85
1,826
def find_ccs(unmerged): """ Find connected components of a list of sets. E.g. x = [{'a','b'}, {'a','c'}, {'d'}] find_cc(x) [{'a','b','c'}, {'d'}] """ merged = set() while unmerged: elem = unmerged.pop() shares_elements = False for s in merged.copy(): if not elem.isdisjoint(s): merged.remove(s) merged.add(frozenset(s.union(elem))) shares_elements = True if not shares_elements: merged.add(frozenset(elem)) return [list(x) for x in merged]
4bff4cc32237dacac7737ff509b4a68143a03914
1,827
import os def checkLastJob(jobsFolder): """Count number of folders in folder :param jobsFolder: directory with jobs :return: number of created jobs """ allFolders = os.listdir(jobsFolder) jobsFolders = [f for f in allFolders if f.startswith('job')] jobsCount = len(jobsFolders) return jobsCount
17ea83ffc07134d91d66a08ee59ed85b499c8e4d
1,828
def gen_image_name(reference: str) -> str: """ Generate the image name as a signing input, based on the docker reference. Args: reference: Docker reference for the signed content, e.g. registry.redhat.io/redhat/community-operator-index:v4.9 """ no_tag = reference.split(":")[0] image_parts = no_tag.split("/") return "/".join(image_parts[1:])
ccaecfe91b5b16a85e3a3c87b83bbc91e54080b1
1,829
def get_chat_id(update): """ Get chat ID from update. Args: update (instance): Incoming update. Returns: (int, None): Chat ID. """ # Simple messages if update.message: return update.message.chat_id # Menu callbacks if update.callback_query: return update.callback_query.message.chat_id return None
1669382fd430b445ea9e3a1306c1e68bf2ec0013
1,830
import time def timeit(method): """ Timing Decorator Function Written by Fahim Sakri of PythonHive (https://medium.com/pthonhive) """ def timed(*args, **kwargs): time_start = time.time() time_end = time.time() result = method(*args, **kwargs) if 'log_time' in kwargs: name = kwargs.get('log_name', method.__name__.upper()) kwargs['log_time'][name] = int((time_end - time_start) * 1000) else: print('\n{} {:5f} ms'.format(method.__name__, (time_end - time_start) * 1000)) return result return timed
598667950bc707b72239af9f4e5a3248dbe64d96
1,833
import requests def upload_record(data, headers, rdr_project_id): """ Upload a supplied record to the research data repository """ request_url = f"https://api.figsh.com/v2/account/projects/{rdr_project_id}/articles" response = requests.post(request_url, headers=headers, json=data) return response.json()
7431234757668f9157f90aa8a9c335ee0e2a043b
1,834
def generate_url_fragment(title, blog_post_id): """Generates the url fragment for a blog post from the title of the blog post. Args: title: str. The title of the blog post. blog_post_id: str. The unique blog post ID. Returns: str. The url fragment of the blog post. """ lower_title = title.lower() hyphenated_title = lower_title.replace(' ', '-') lower_id = blog_post_id.lower() return hyphenated_title + '-' + lower_id
c846e6203fa4782c6dc92c892b9e0b6c7a0077b5
1,835
def leap_year(): """ This functions seeks to return a leap year after user input << integer(4). Rules for a leap year: As you surely know, due to some astronomical reasons, years may be leap or common. The former are 366 days long, while the latter are 365 days long. Since the introduction of the Gregorian calendar (in 1582), the following rule is used to determine the kind of year: -->if the year number isn't divisible by four, it's a common year; -->otherwise, if the year number isn't divisible by 100, it's a leap year; -->otherwise, if the year number isn't divisible by 400, it's a common year; -->otherwise, it's a leap year. :return: Year --> Integer """ year = int(input("Enter a year: ")) mess_1 = 'It\'s a common year!' mess_2 = 'It\'s a leap year!' if year <= 1582: return f'{year} does not fall under Gregorian Calendar!!' elif year % 4 != 0: return mess_1 elif year % 100 != 0: return mess_2 elif year % 400 != 0: return mess_1 else: return mess_2
5cf459514ce768c1cf633fdddab5f986004bc1c8
1,836
def login(request): """Login view for GET requests.""" logged_in = request.authenticated_userid is not None if logged_in: return {'logged_in': True, 'form_enabled': False, 'status': u'Already logged in', 'status_type': u'info'} status = u'' status_type = u'' return { 'form_enabled': True, 'status_type': status_type, 'status': status, 'logged_in': False, 'username': request.params.get('username', u''), }
8cab36d8d059d0683ef2e84a40cca5c99a27c6fc
1,837
def of_type(_type, value_1, *args) -> bool: """ Check if a collection of values are of the same type. Parameters: _type (any): The type to check for. value_1 (any): The first value to check. *args (any): Rest of values to check against given type. Returns: (bool) whether or not all inputs of given type. """ all_of_type = isinstance(value_1, _type) i = len(args) while i > 0 and all_of_type != False: all_of_type = isinstance(args[i-1], _type) i -= 1 return all_of_type
eab1e70655ff74b1cbfc338a893719b7f0681f4a
1,838
def _make_source(cls_source: str, cls_name: str, instance_method: str): """Converts a class source to a string including necessary imports. Args: cls_source (str): A string representing the source code of a user-written class. cls_name (str): The name of the class cls_source represents. instance_method (str): The method within the class that should be called from __main__ Returns: A string representing a user-written class that can be written to a file in order to yield an inner script for the ModelBuilder SDK. The only difference between the user-written code and the string returned by this method is that the user has the option to specify a method to call from __main__. """ src = "\n".join(["import torch", "import pandas as pd", cls_source]) src = src + "if __name__ == '__main__':\n" + f"\t{cls_name}().{instance_method}()" return src
105ca5d34c0de2bfc81937aaaf14b4d610eaa35a
1,840
def get_accessible_cases(item, user): """Return all accessible for a cohort and user.""" return getattr(item, "get_accessible_cases_for_user")(user)
42d54ebf672ce401ac311f9868f6b19f93418065
1,841
def hex_to_byte(hexStr): """ Convert hex strings to bytes. """ bytes = [] hexStr = ''.join(hexStr.split(" ")) for i in range(0, len(hexStr), 2): bytes.append(chr(int(hexStr[i:i + 2], 16))) return ''.join(bytes)
a424d65b0a02c0d10ee5c7c25409f4a0ce477528
1,842
def _vital_config_update(cfg, cfg_in): """ Treat a vital Config object like a python dictionary Args: cfg (kwiver.vital.config.config.Config): config to update cfg_in (dict | kwiver.vital.config.config.Config): new values """ # vital cfg.merge_config doesnt support dictionary input if isinstance(cfg_in, dict): for key, value in cfg_in.items(): if cfg.has_value(key): cfg.set_value(key, str(value)) else: raise KeyError('cfg has no key={}'.format(key)) else: cfg.merge_config(cfg_in) return cfg
35a0092013229f3b71a1ba06bbb660f861ef391c
1,843
import struct def _read_extended_field_value(value, rawdata): """Used to decode large values of option delta and option length from raw binary form.""" if value >= 0 and value < 13: return (value, rawdata) elif value == 13: return (rawdata[0] + 13, rawdata[1:]) elif value == 14: return (struct.unpack('!H', rawdata[:2])[0] + 269, rawdata[2:]) else: raise ValueError("Value out of range.")
12a1f665f133f6ea5ffc817bf69ec0a9e0e07dbc
1,844
def get_dcgan_args(parser, args=[]): """ parameters determing the DCGAN parameters """ # DCGAN: # ------------------------------------------------------------------------ parser.add_argument( "--lam", type=float, default=10, help="Factor for scaling gradient penalty" ) parser.add_argument( "--wgan", type=bool, default=False, help="Determine if WGAN training should be activated", ) parser.add_argument( "--p_drop", type=float, default=0.1, help="Dropout probability for the Discriminator network", ) # ------------------------------------------------------------------------ return parser
28d00721fad62ecbc381190b05d81fe578860f8e
1,845
import os def _gen_span_id() -> str: """Return 16 random hexadecimal digits. The id is used for distributed tracing. """ return os.urandom(8).hex()
4c70028da278eb26c947c9ca24e0c527f6744860
1,846
from typing import Tuple import re def _parse_cli_variable(mapping_str: str) -> Tuple[str, str]: """Checks that the input is of shape `name:value` and then splits it into a tuple""" match = re.match(r"(?P<name>.+?):(?P<value>.+)", mapping_str) if match is None: raise ValueError(f'CLI variable input {mapping_str} is not of form `"name:value"`') parsed = match.groupdict() return parsed["name"], parsed["value"]
f701b7e85c45c2df35e1252721cd3215357909ba
1,847
def get_point(points, cmp, axis): """ Get a point based on values of either x or y axys. :cmp: Integer less than or greater than 0, representing respectively < and > singhs. :returns: the index of the point matching the constraints """ index = 0 for i in range(len(points)): if cmp < 0: if points[i][axis] < points[index][axis]: index = i else: if points[i][axis] > points[index][axis]: index = i return index
b59035d390e83b45a0131e28c4acf7e302cf3e45
1,848
import pathlib def create_jobs_list(chunks, outdir, *filters): # TO DO # Figure out the packing/unpacking """ Create a list of dictionaries that hold information for the given chunks Arguments: chunks: list: A list of lists. Each nested list contains the filepaths to be processed outdir: Path object: The directory where results will be written filters: Callables Return: jobs_list: list: A list of dictionaries that holds information for the execution of each chunk. Of the form [ {'chunk_id' : int, (0,1,2,...) 'out_fp' : Path object, (outdir/chunk_<chunk_id>.fa.gz) 'fastas' : list of Path objects, ([PosixPath('path/to/PATRIC.faa'),...]) 'filters' : list of functions } ] """ jobs_list = [] for i, chunk in enumerate(chunks): chunk_id = f"chunk_{i}" chunk_out = f"{chunk_id}.fa.gz" out_fp = outdir / pathlib.Path(chunk_out) # chunk_skipped = f"{chunk_id}.skipped.txt" chunk_fastas = chunk chunk_dict = { "chunk_id": chunk_id, "fastas": chunk_fastas, "out_fp": out_fp, # Should there be an if filters or if len(filters) != 0 ? "filters": [f for f in filters], } jobs_list.append(chunk_dict) return jobs_list
433992eb34bc1f80d12f8cdcee3dbd99d04d22c1
1,849
def parse_decodes(sentences, predictions, lengths, label_vocab): """Parse the padding result Args: sentences (list): the tagging sentences. predictions (list): the prediction tags. lengths (list): the valid length of each sentence. label_vocab (dict): the label vocab. Returns: outputs (list): the formatted output. """ predictions = [x for batch in predictions for x in batch] lengths = [x for batch in lengths for x in batch] id_label = dict(zip(label_vocab.values(), label_vocab.keys())) outputs = [] for idx, end in enumerate(lengths): sent = sentences[idx][:end] tags = [id_label[x] for x in predictions[idx][:end]] sent_out = [] tags_out = [] words = "" for s, t in zip(sent, tags): if t.endswith('-B') or t == 'O': if len(words): sent_out.append(words) tags_out.append(t.split('-')[0]) words = s else: words += s if len(sent_out) < len(tags_out): sent_out.append(words) outputs.append(''.join( [str((s, t)) for s, t in zip(sent_out, tags_out)])) return outputs
bf40d8570e0a552853108e860fd193c0d9940e98
1,851
def _check_max_features(importances, max_features): """Interpret the max_features value""" n_features = len(importances) if max_features is None: max_features = n_features elif isinstance(max_features, int): max_features = min(n_features, max_features) elif isinstance(max_features, float): max_features = int(n_features * max_features) return max_features
816daf9d99ac4ecd2d5024a3be63f793d7669e1f
1,854
def premises_to_syllogism(premises): """ >>> premises_to_syllogism(["Aab", "Ebc"]) 'AE1' """ figure = {"abbc": "1", "bacb": "2", "abcb": "3", "babc": "4"}[premises[0][1:] + premises[1][1:]] return premises[0][0] + premises[1][0] + figure
a048d44acea1eb4c9346880a74547a9cd100ebf0
1,855
def is_sum_lucky(x, y): """This returns a string describing whether or not the sum of input is lucky This function first makes sure the inputs are valid and then calculates the sum. Then, it will determine a message to return based on whether or not that sum should be considered "lucky" """ if x != None: if y is not None: result = x+y; if result == 7: return 'a lucky number!' else: return( 'an unlucky number!') return ('just a normal number')
081b5e8cc2657a00ea160e398fb00f84187e2ab6
1,856
import asyncio def unsync_function(func, *args, **kwargs): """Runs an async function in a standard blocking way and returns output""" return asyncio.run(func(*args, **kwargs))
cd7c19bf226b78c9e3c4b19325e7acb4fcc90e21
1,857
def imputation_Y(X, model): """Perform imputation. Don't normalize for depth. Args: X: feature matrix from h5. model: a trained scBasset model. Returns: array: a peak*cell imputed accessibility matrix. Sequencing depth isn't corrected for. """ Y_impute = model.predict(X) return Y_impute
75e2de758c3544655d4332098d4398255770d7c3
1,858
def is_row_and_col_balanced(T1, T2): """ Partial latin squares T1 and T2 are balanced if the symbols appearing in row r of T1 are the same as the symbols appearing in row r of T2, for each r, and if the same condition holds on columns. EXAMPLES:: sage: from sage.combinat.matrices.latin import * sage: T1 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: T2 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: is_row_and_col_balanced(T1, T2) True sage: T2 = matrix([[0,3,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: is_row_and_col_balanced(T1, T2) False """ for r in range(T1.nrows()): val1 = set(x for x in T1.row(r) if x >= 0) val2 = set(x for x in T2.row(r) if x >= 0) if val1 != val2: return False for c in range(T1.ncols()): val1 = set(x for x in T1.column(c) if x >= 0) val2 = set(x for x in T2.column(c) if x >= 0) if val1 != val2: return False return True
f0a9d1522da2fc079d4021603198e79c438de727
1,860
def submit(ds, entry_name, molecule, index): """ Submit an optimization job to a QCArchive server. Parameters ---------- ds : qcportal.collections.OptimizationDataset The QCArchive OptimizationDataset object that this calculation belongs to entry_name : str The base entry name that the conformation belongs to. Usually, this is a canonical SMILES, but can be anything as it is represents a key in a dictionary-like datastructure. This will be used as an entry name in the dataset molecule : QCMolecule The JSON representation of a QCMolecule, which has geometry and connectivity present, among others index : int The conformation identifier of the molecule. This is used to make the entry names unique, since each conformation must have its own unique entry in the dataset in the dataset Returns ------- (unique_id, success): tuple unique_id : str The unique_id that was submitted to the dataset. This is the name of the new entry in the dataset. success : bool Whether the dataset was able to successfully add the entry. If this is False, then the entry with the name corresponding to unique_id was already present in the dataset. """ # This workaround prevents cmiles from crashing if OE is installed but has # no license. Even though rdkit is specified, protomer enumeration is OE- # specific and still attempted. # oe_flag = cmiles.utils.has_openeye # cmiles.utils.has_openeye = False # attrs = cmiles.generator.get_molecule_ids(molecule, toolkit="rdkit") # cmiles.utils.has_openeye = oe_flag CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles" molecule["extras"] = {CIEHMS: entry_name} attrs = {CIEHMS: entry_name} unique_id = entry_name + f"-{index}" success = False try: ds.add_entry(unique_id, molecule, attributes=attrs, save=False) success = True except KeyError: pass return unique_id, success
50a30a25af59906ce5636ce8a176e29befd27d60
1,861
def _ensure_min_resources(progs, cores, memory, min_memory): """Ensure setting match minimum resources required for used programs. """ for p in progs: if p in min_memory: if not memory or cores * memory < min_memory[p]: memory = float(min_memory[p]) / cores return cores, memory
f311259242a73a7bc527e3601765c95153a08748
1,862
import ctypes def ctypes_pointer(name): """Create a ctypes type representing a C pointer to a custom data type ``name``.""" return type("c_%s_p" % name, (ctypes.c_void_p,), {})
d87f10ac06391379a24f166272fd42fa938e3676
1,863
def split_and_load(data, ctx_list, batch_axis=0, even_split=True): """Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads each slice to one context in `ctx_list`. Parameters ---------- data : NDArray A batch of data. ctx_list : list of Context A list of Contexts. batch_axis : int, default 0 The axis along which to slice. even_split : bool, default True Whether to force all slices to have the same number of elements. Returns ------- list of NDArray Each corresponds to a context in `ctx_list`. """ if len(ctx_list) == 1: return [d.as_in_context(ctx_list[0]) for d in data] size = len(data) num_slice = len(ctx_list) step = size // num_slice for i in range(num_slice): for k in range(i*step, (i+1)*step): data[k].as_in_context(ctx_list[i]) return data
4b8f0d1b6b256895da3e37fbb4b1be0cd0da5c46
1,867
def string_to_weld_literal(s): """ Converts a string to a UTF-8 encoded Weld literal byte-vector. Examples -------- >>> string_to_weld_literal('hello') '[104c,101c,108c,108c,111c]' """ return "[" + ",".join([str(b) + 'c' for b in list(s.encode('utf-8'))]) + "]"
d85b016091988c9307cbed56aafdd5766c3c9be5
1,869
def verify_model_licensed(class_name : str, model_path:str): """ Load a licensed model from HDD """ try : m = eval(class_name).load(model_path) return m except: print(f"Could not load Annotator class={class_name} located in {model_path}. Try updaing spark-nlp-jsl")
057987d838982a85925f70c93ff2f4166b038cec
1,870
def parse_metrics(match, key): """Gets the metrics out of the parsed logger stream""" elements = match.split(' ')[1:] elements = filter(lambda x: len(x) > 2, elements) elements = [float(e) for e in elements] metrics = dict(zip(['key', 'precision', 'recall', 'f1'], [key] + elements)) return metrics
70de1ad16edfe827e0a851c719d902695696700f
1,871
def listtimes(list, c): """multiplies the elements in the list by the given scalar value c""" ret = [] for i in range(0, len(list)): ret.extend([list[i]]*c); return ret;
8aef63677a1a926f355644187d58b47e437e152c
1,873
def eval_f(f, xs): """Takes a function f = f(x) and a list xs of values that should be used as arguments for f. The function eval_f should apply the function f subsequently to every value x in xs, and return a list fs of function values. I.e. for an input argument xs=[x0, x1, x2,..., xn] the function eval_f(f, xs) should return [f(x0), f(x1), f(x2), ..., f(xn)].""" return [f(x) for x in xs] # alternatively: return list(map(f, xs))
00c6ed7fc59b213a3ec9fec9feeb3d91b1522061
1,874
import numpy def rmSingles(fluxcomponent, targetstring='target'): """ Filter out targets in fluxcomponent that have only one ALMA source. """ nindiv = len(fluxcomponent) flagger = numpy.zeros(nindiv) for icomp in range(nindiv): target = fluxcomponent[targetstring][icomp] match = fluxcomponent[targetstring] == target nmatch = fluxcomponent[targetstring][match].size if nmatch == 1: flagger[icomp] = 1 goodflag = flagger == 0 fluxcomponent = fluxcomponent[goodflag] return fluxcomponent
013d5f3169fd1dcb277733627ecd5b0135bc33fb
1,876
import torch def compute_accuracy(outputs, targets, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = targets.size(0) _, preds = outputs.topk(maxk, 1, True, True) preds = preds.t() corrects = preds.eq(targets[None]) result_list = [] for k in topk: correct_k = corrects[:k].flatten().sum(dtype=torch.float32) result_list.append(correct_k * (100.0 / batch_size)) return result_list
6cfcc9e43aaaed09baae567f9cc27818c555fe5f
1,877
import io def unpack_text_io_wrapper(fp, encoding): """ If *fp* is a #io.TextIOWrapper object, this function returns the underlying binary stream and the encoding of the IO-wrapper object. If *encoding* is not None and does not match with the encoding specified in the IO-wrapper, a #RuntimeError is raised. """ if isinstance(fp, io.TextIOWrapper): if fp.writable() and encoding is not None and fp.encoding != encoding: msg = 'TextIOWrapper.encoding({0!r}) != {1!r}' raise RuntimeError(msg.format(fp.encoding, encoding)) if encoding is None: encoding = fp.encoding fp = fp.buffer return fp, encoding
f2c93babab4bff1f08e6fe5c04fbd97dd1ee8a84
1,878
def scale_bounding_box(bounding_box,scale): """Scales bounding box coords (in dict from {x1,y1,x2,y2}) by x and y given by sclae in dict form {x,y}""" scaled_bounding_box = { "x1" : int(round(bounding_box["x1"]*scale["x"])) ,"y1" : int(round(bounding_box["y1"]*scale["y"])) ,"x2" : int(round(bounding_box["x2"]*scale["x"])) ,"y2" : int(round(bounding_box["y2"]*scale["y"])) } return scaled_bounding_box
8aa374537ed2ae3ae2324bd8a4819e981f281b71
1,880
import click def is_command(obj) -> bool: """ Return whether ``obj`` is a click command. :param obj: """ return isinstance(obj, click.Command)
8159aea42baca70b3218a0b82e2f4dc3f34278aa
1,881
def GetContigs(orthologs): """get map of contigs to orthologs. An ortholog can be part of only one contig, but the same ortholog_id can be part of several contigs. """ contigs = {} for id, oo in orthologs.items(): for o in oo: if o.contig not in contigs: contigs[o.contig] = [] contigs[o.contig].append(o) return contigs
0c449a31e60f1a149317de815d630c4d8a817ca1
1,882
def remove_hydrogens(list_of_lines): """ Removes hydrogen from the pdb file. To add back the hydrogens, run the reduce program on the file. """ return (line for line in list_of_lines if line['element']!=" H")
164ac79171cf6b3632fe7909ace91ffe75192b61
1,883
import torch def label_smooth_loss(log_prob, label, confidence=0.9): """ :param log_prob: log probability :param label: one hot encoded :param confidence: we replace one (in the one hot) with confidence. 0 <= confidence <= 1. :return: """ N = log_prob.size(0) C = log_prob.size(1) smoothed_label = torch.full(size=(N, C), fill_value=(1-confidence) / (C - 1)).to(log_prob) smoothed_label.scatter_(dim=1, index=torch.unsqueeze(label, dim=1), value=confidence) loss = - torch.sum(log_prob * smoothed_label) / N return loss
f1164d1a41d2c275ae4e406e2a46a0d50a2d240d
1,884
import re def parse_field_pubblicazione(field): """ Extracts year, place and publisher from the field `pubblicazione` by applying a cascade of regexps. """ exp2 = r'^(?P<place>\D+)(?:\s?\W\s?)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$' exp1 = r'^(?P<place>.*?)(?::)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$' exp3 = r'(?:.*?)?(?P<year>\d{4})' exp4 = r'^(?P<place>\D{3,})$' not_matched = 0 partly_matched = 0 result = {} result1 = re.match(exp1,field) if(result1 is None): result2 = re.match(exp2,field) if(result2 is None): result3 = re.match(exp3,field) if(result3 is None): result4 = re.match(exp4,field) if(result4 is None): not_matched += 1 else: result = result4.groupdict() else: result = result3.groupdict() else: result = result2.groupdict() else: result = result1.groupdict() return result
91aee4dabf62b3ec5bccff2a07d664312226448c
1,885
import re def _strip_build_number(api_version): """Removes the build number component from a full api version string.""" match = re.match(r"^([A-Z]+-)?([0-9]+)(\.[0-9]+){2}$", api_version) if match: return api_version[:match.start(3)] # if there aren't exactly 3 version number components, just leave it unchanged return api_version
20d8023281f05dfcb8c9fdd021b77796c72e1001
1,886
def five_five(n): """ This checks if n is a power of 2 (or 0). This is because the only way that n and (n-1) have none of the same bits (the & check) is when n is a power of 2, or 0. """ return ((n & (n-1)) == 0)
0b1cc310b5d8bd6dab6299b6a999a5dd0720ea80
1,888
def convert_bytes_to_size(some_bytes): """ Convert number of bytes to appropriate form for display. :param some_bytes: A string or integer :return: A string """ some_bytes = int(some_bytes) suffix_dict = { '0': 'B', '1': 'KiB', '2': 'MiB', '3': 'GiB', '4': 'TiB', '5': 'PiB' } counter = 0 while some_bytes > 1 and counter <= 5: tmp = some_bytes / 1024 if tmp < 1: break else: some_bytes = tmp counter += 1 return str(format(some_bytes, '.2f')) + ' ' + str(suffix_dict[str(counter)])
d1579e0fc0850a98145910c056b3fac8be7c66f1
1,889
def degreeList(s): """Convert degrees given on command line to a list. For example, the string '1,2-5,7' is converted to [1,2,3,4,5,7].""" l = [] for r in s.split(','): t = r.split('-') if len(t) == 1: l.append(int(t[0])) else: a = int(t[0]) b = int(t[1]) l.extend(range(a,b+1, (1 if a <= b else -1))) return sorted(l)
3b517831ddab47da5cd0e36fa5913d6d59e73715
1,891
def get_bounding_box(font): """ Returns max and min bbox of given truetype font """ ymin = 0 ymax = 0 if font.sfntVersion == 'OTTO': ymin = font['head'].yMin ymax = font['head'].yMax else: for g in font['glyf'].glyphs: char = font['glyf'][g] if hasattr(char, 'yMin') and ymin > char.yMin: ymin = char.yMin if hasattr(char, 'yMax') and ymax < char.yMax: ymax = char.yMax return ymin, ymax
98161ef3426c2bb9b6dc4079c69f5c1f9d4e93a2
1,892
def create_user(client, profile, user, resend=False): """ Creates a new user in the specified user pool """ try: if resend: # Resend confirmation email for get back password response = client.admin_create_user( UserPoolId=profile["user_pool_id"], Username=user.email, MessageAction="RESEND", ) else: response = client.admin_create_user( UserPoolId=profile["user_pool_id"], Username=user.email, UserAttributes=[ {"Name": "email", "Value": user.email}, {"Name": "email_verified", "Value": "true"}, ], ) if response["ResponseMetadata"]["HTTPStatusCode"] == 200: if resend: print(f"Resend confirmation to user {user.email} successfully") else: print(f"User {user.email} was created successfully") return response except client.exceptions.UsernameExistsException as error: print(f"User {user.email} exists") return error.response except client.exceptions.ClientError as error: print(f"Fail to create user {user.email}: {error.response}") return error.response
4c1f83c0ab7fd28dc7b1e2d8f2efa224360dfdb1
1,893
import re def sort_special_vertex_groups(vgroups, special_vertex_group_pattern='STYMO:', global_special_vertex_group_suffix='Character'): """ Given a list of special vertex group names, all with the prefix of special_vertex_group_pattern, selects all that start with global_special_vertex_group_suffix and puts them at the start of the list. This enables e.g. to easily define top-level vertex groups that always go first, followed by details that overwrite top level assignments. """ global_vg_name_pattern = special_vertex_group_pattern + \ global_special_vertex_group_suffix first = [] last = [] for g in vgroups: if re.match(global_vg_name_pattern, g) is not None: first.append(g) else: last.append(g) first.sort() last.sort() first.extend(last) return first
0cc8f0992553e5da5b37ea9a9886996cb9013582
1,895
import hashlib def obtain_file_hash(path, hash_algo="md5"): """Obtains the hash of a file using the specified hash algorithm """ hash_algo = hashlib.sha256() if hash_algo=="sha256" else hashlib.md5() block_size = 65535 with open(path, 'rb') as f: for chunk in iter(lambda: f.read(block_size),b''): hash_algo.update(chunk) return hash_algo.hexdigest()
daa996339c638eaab4f3d067dcaaa4b865a6f923
1,896
def keras_decay(step, decay=0.0001): """Learning rate decay in Keras-style""" return 1. / (1. + decay * step)
f26f1f100ecf1622d6da9958d0a6cd95a37b8b2a
1,897
import collections def _build_pep8_output(result): """ Build the PEP8 output based on flake8 results. Results from both tools conform to the following format: <filename>:<line number>:<column number>: <issue code> <issue desc> with some issues providing more details in the description within parentheses. :param result: output from flake8 :returns: list of flake8 output lines by error """ # Aggregate individual errors by error _dict = collections.defaultdict(list) for line in str(result).split("\n"): if line: # Preserve only the code and brief description for each issue to # facilitate aggregating the results. For example, # # E501 line too long (178 > 79 characters) -> E501 line too long # E303 too many blank lines (4) -> E303 too many blank lines parts = line.replace("(", ":").split(":") line_num, col_num, base_issue = parts[1:4] # Strip the whitespace around the base <issue code> <description>. # # Also restore the missing colon, stripped above, if the issue # was 'missing whitespace' surrounding a colon. issue = base_issue.strip() key = "{}:'".format(issue) if issue.endswith("after '") else issue _dict[key].append("{} ({})".format(line_num, col_num)) # Build the output as one issue per entry return ["{}: {}".format(k, ", ".join(_dict[k])) for k in sorted(_dict.keys())]
a4abda2f9d3a2d9b3524c60429b047cbfe0285d9
1,898
def form_value(request, entity, attribute): """ Return value from request params or the given entity. :param request: Pyramid request. :param entity: Instance to get attribute from if it isn't found in the request params. :param str attribute: Name of attribute to search for in the request params or on as an attribute of the given entity. """ # Check for contains, because we want the request value even if it's empty if attribute in request.params: return request.params.get(attribute, '') if entity: # Don't provide a default value, because we want to make attribute typos clear return getattr(entity, attribute) return ''
1daea77474dae5a1cb6fdab0b075a5b2f5c40865
1,899