content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def validate_regex(regex_str): """ Checks if a given string is valid regex :param str regex_str: a suspicios string that may or may not be valid regex :rtype: bool :return: True if valid regex was give, False in case of TypeError or re.error """ # another of those super basic function where i am not sure if there isn't an easier way try: re.compile(regex_str) return True except re.error: return False except TypeError: # for the string not being one return False
97c6e2338eb67c2d4be74e3a18a4393a1eb36242
707,669
import base64 def _b64(b): """Helper function base64 encode for jose spec.""" return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
4777d4f47de2c72b8dd95b765fc54d1abc6763f0
707,671
import torch def reparameterize(mu, logvar, n_samples=1): """Reparameterization trick. Args: mu (torch.Tensor): Mean. logvar (torch.Tensor): Logarithm of variation. n_samples (int): The number of samples. Returns: torch.Tensor: Samples drawn from the given Gaussian distribution. The shape is equal to mu if n_samples is 1, and (n_samples, *mu.shape) if n_samples is larger than 1. """ std = torch.exp(0.5 * logvar) eps = torch.randn(n_samples, *std.size(), device=std.device) z = mu + eps * std return z.squeeze(0)
726473147ee28f470ad7d543e2b36bc512ffd0ae
707,672
def boolean_fn2(a, b, c): """ Return the truth value of (a ∧ b) ∨ (-a ∧ -b) """ return a and b or not a and not b
c1ef37b3503866e9460fb95c4ab609278c6cff52
707,673
def b2p(exts): """Convert two points of a polygon into its bounding box. (Rectangular polygon parallel with axes.) """ p0x = exts[0][0] p0y = exts[0][1] p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0' p1x = exts[0][2] p1y = exts[0][3] p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0' pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0' pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0' e = "%s %s %s %s %s" % (p0, pb, p1, pu, p0) i = [] if exts[1] is not None: for h in exts[1]: p0x = h[0] p0y = h[1] p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0' p1x = h[2] p1y = h[3] p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0' pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0' pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0' i.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0)) return e, i
11a51cffb8143b01b60904bef4c92e6f7335dc1d
707,674
from datetime import datetime def parse_date(txt): """ Returns None or parsed date as {h, m, D, M, Y}. """ date = None clock = None for word in txt.split(' '): if date is None: try: date = datetime.strptime(word, "%d-%m-%Y") continue except ValueError: pass try: date = datetime.strptime(word, "%d.%m.%Y") continue except ValueError: pass if clock is None: try: clock = datetime.strptime(word, "%H:%M") continue except ValueError: pass if date is not None and clock is not None: return {'h': clock.hour, 'm': clock.minute, 'D': date.day, 'M': date.month, 'Y': date.year} return None
80660673d6b4179fa7b4907983ed84bc41c4189b
707,676
def ln_new_model_to_gll(py, new_flag_dir, output_dir): """ make up the new gll directory based on the OUTPUT_MODEL. """ script = f"{py} -m seisflow.scripts.structure_inversion.ln_new_model_to_gll --new_flag_dir {new_flag_dir} --output_dir {output_dir}; \n" return script
acdf28cbc2231bd2f33ae418136ce7da0fce421f
707,677
import os def get_theme_section_directories(theme_folder:str, sections:list = []) -> list: """Gets a list of the available sections for a theme Explanation ----------- Essentially this function goes into a theme folder (full path to a theme), looks for a folder called sections and returns a list of all the .jinja files available stripped of the extension so i.e. if `<theme folder>/sections` had 3 files `education.jinja`, `work_experience.jinja` and `volunteering_experience.jinja` this function would return ['education', 'work_experience', 'volunteering_experience'] Parameters ---------- sections : (list, optional) A list of sections names, or an empty list if they need to be searched for theme_folder : str The full path to the theme folder (typically from calling locate_theme_directory() ) Returns ------- list The name(s) of the section templates that exist within the sections list without extensions """ if sections: return sections if not sections and os.path.exists(os.path.join(theme_folder, "sections")): for section in os.listdir(os.path.join(theme_folder, "sections")): if section.endswith(".jinja"): section = section.replace(".jinja", "") sections.append(section) return sections
5e024546bbf878e0954660d4bd5adb765ffd7e43
707,678
import json async def create_account(*, user): """ Open an account for a user Save account details in json file """ with open("mainbank.json", "r") as f: users = json.load(f) if str(user.id) in users: return False else: users[str(user.id)] = {"wallet": 0, "bank": 0} with open("mainbank.json", "w") as f: json.dump(users, f)
0e1aaccfd0c9cda6238ba8caa90e80979540f2e8
707,679
def apply_move(board_state, move, side): """Returns a copy of the given board_state with the desired move applied. Args: board_state (3x3 tuple of int): The given board_state we want to apply the move to. move (int, int): The position we want to make the move in. side (int): The side we are making this move for, 1 for the first player, -1 for the second player. Returns: (3x3 tuple of int): A copy of the board_state with the given move applied for the given side. """ move_x, move_y = move def get_tuples(): for x in range(3): if move_x == x: temp = list(board_state[x]) temp[move_y] = side yield tuple(temp) else: yield board_state[x] return tuple(get_tuples())
b47da6ddab3bd1abf99ee558471a3696e46b8352
707,681
def is_one_of_type(val, types): """Returns whether the given value is one of the given types. :param val: The value to evaluate :param types: A sequence of types to check against. :return: Whether the given value is one of the given types. """ result = False val_type = type(val) for tt in types: if val_type is tt: result = True return result
4bda5ebc41aa7377a93fdb02ce85c50b9042e2c1
707,683
import os import fnmatch import re def load_qadata(qa_dir): """ :param qa_dir: the file path of the provided QA dataset, eg: /data/preprocessed_data_10k/test; :return: the dictionary of the QA dataset, for instance QA_1_; """ print("begin_load_qadata") qa_set = {} # os.walk: generates the file names in a directory tree by walking the tree. # default: top, which is used to yield 3-tuples, # i.e., (dirpath, dirnames, filenames) for each directory rooted at directory for root, dirnames, filenames in os.walk(qa_dir): if(dirnames == []): qa_id = root[root.rfind("_")+1:] qa_dict ={} for filename in fnmatch.filter(filenames, '*.txt'): pattern = re.compile('QA_\d+_') # re.sub: substitute the pattern with "" in filename. keystr = re.sub(pattern,"", filename).replace(".txt","") qa_dict[keystr] = open(root+"/"+filename).readlines() qa_set[qa_id] = qa_dict print("load_qadata_success") return qa_set
3f0060d34ca47951efb29388f723fe75bfaa875a
707,684
def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
654d085347913bca2fd2834816b988ea81ab7164
707,686
def fmla_for_filt(filt): """ transform a set of column filters from a dictionary like { 'varX':['lv11','lvl2'],...} into an R selector expression like 'varX %in% c("lvl1","lvl2")' & ... """ return ' & '.join([ '{var} %in% c({lvls})'.format( var=k, lvls=','.join(map(lambda x:'"%s"' % x, v)) if type(v) == list else '"%s"' % v ) for k, v in filt.items() ])
149d23822a408ad0d96d7cefd393b489b4b7ecfa
707,687
def gaussian_ll_pdf(x, mu, sigma): """Evaluates the (unnormalized) log of the normal PDF at point x Parameters ---------- x : float or array-like point at which to evaluate the log pdf mu : float or array-like mean of the normal on a linear scale sigma : float or array-like standard deviation of the normal on a linear scale """ log_pdf = -0.5*(x - mu)**2.0/sigma**2.0 #- np.log(sigma) - 0.5*np.log(2.0*np.pi) return log_pdf
dbf1e389ad8349093c6262b2c595a2e511f2cb28
707,688
from typing import Tuple def ordered_pair(x: complex) -> Tuple[float, float]: """ Returns the tuple (a, b), like the ordered pair in the complex plane """ return (x.real, x.imag)
c67e43cf80194f7a5c7c5fd20f2e52464816d056
707,689
import os def find_fits_file(plate_dir_list, fits_partial_path): """ Returns a path :rtype : basestring """ for plate_dir in plate_dir_list: fits_path = os.path.join(plate_dir, fits_partial_path) if os.path.exists(fits_path): return fits_path return None
24c5c0e8a42cc5f91e3935c8250b217ac2becd3f
707,690
def _pull(keys): """helper method for implementing `client.pull` via `client.apply`""" if isinstance(keys, (list,tuple, set)): return [eval(key, globals()) for key in keys] else: return eval(keys, globals())
779fcec45c3693bdd8316c14138a88c57f0c318c
707,692
def position(df): """ 根据交易信号, 计算每天的仓位 :param df: :return: """ # 由 signal 计算出实际每天持有的股票仓位 df['pos'] = df['signal'].shift(1) df['pos'].fillna(method='ffill', inplace=True) # 将涨跌停时不得买卖股票考虑进来 # 找出开盘涨停的日期 cond_cannot_buy = df['开盘价'] > df['收盘价'].shift(1) * 1.097 # 今天的开盘价相对于昨天的收盘价上涨了 9.7% # 将开盘涨停日, 并且当天 position 为 1 时的 'pos' 设置为空值 # ?? 问题:为什么是 1? df.loc[cond_cannot_buy & (df['pos'] == 1), 'pos'] = None # 找出开盘跌停的日期 cond_cannot_buy = df['开盘价'] < df['收盘价'].shift(1) * 0.903 # 今天的开盘价相对于昨天的收盘价下跌了 9.7% # 将开盘跌停日, 并且当天 position 为 0 时的 'pos' 设置为空值 # ?? 问题:为什么是 0? df.loc[cond_cannot_buy & (df['pos'] == 0), 'pos'] = None # position 为空的日期, 不能买卖。position 只能和前一个交易日保持一致。 df['pos'].fillna(method='ffill', inplace=True) # 在 position 为空值的日期, 将 position 补全为 0 df['pos'].fillna(value=0, inplace=True) return df
15666e26cf8a9d6ae98ff1746aecab759de9139b
707,693
def prepare_data(data, preprocessed_data, args): """Prepare Data""" data = data.to_numpy() train_size = int(len(data) * args.train_split) test_size = len(data) - train_size train_X = preprocessed_data[0:train_size] train_Y = data[0:train_size] test_X = preprocessed_data[train_size:len(preprocessed_data)] test_Y = data[train_size:len(preprocessed_data)] return train_X, train_Y, test_X, test_Y
b5e120eebd6060656d71f8f76755afd0d8eccce5
707,694
import os def _parse_archive_name(pathname): """Return the name of the project given the pathname of a project archive file. """ return os.path.basename(pathname).split('.')[0]
90e6bcf019ac48b73c16f4db605e1d92c3d32595
707,696
def cli(ctx, user_id): """Create a new API key for a given user. Output: the API key for the user """ return ctx.gi.users.create_user_apikey(user_id)
d7dafd77ef983286184b6f5aa2362bb734389696
707,698
import re def whitespace_tokenizer(text): """Tokenize on whitespace, keeping whitespace. Args: text: The text to tokenize. Returns: list: A list of pseudo-word tokens. """ return re.findall(r"\S+\s*", text)
e79234b15912fdc225e2571788844732296f93d7
707,699
from io import BytesIO def bytes_to_bytesio(bytestream): """Convert a bytestring to a BytesIO ready to be decoded.""" fp = BytesIO() fp.write(bytestream) fp.seek(0) return fp
d59e4f5ccc581898da20bf5d3f6e70f8e8712aa6
707,700
import posixpath def pre_order_next(path, children): """Returns the next dir for pre-order traversal.""" assert path.startswith('/'), path # First subdir is next for subdir in children(path): return posixpath.join(path, subdir) while path != '/': # Next sibling is next name = posixpath.basename(path) parent = posixpath.dirname(path) siblings = list(children(parent)) assert name in siblings if name != siblings[-1]: return posixpath.join(parent, siblings[siblings.index(name) + 1]) # Go up, find a sibling of the parent. path = parent # This was the last one return None
fcbe2b17b29396ac978f4a931a454c988e6fe05b
707,701
def gettiming(process_list, typetiming): """ Used to get a sort set for different duration needed to conver to morse code. """ timing = [] for x in process_list: if(x[0] == typetiming): timing.append(x[3]) timing = set(timing) return sorted(timing)
8e71449eacaee086f9f9147e1c3b8602ce8e553f
707,702
import subprocess def launch_subprocess(command): """ Process launch helper :param command Command to execute :type command list[str]|str :return Popen object """ is_shell = not isinstance(command, (list, tuple)) return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=is_shell, close_fds=True)
f5ab217ffdec69147bc79061b0ba029225c7b8a0
707,703
def _identity_error_message(msg_type, message, status_code, request): """ Set the response code on the request, and return a JSON blob representing a Identity error body, in the format Identity returns error messages. :param str msg_type: What type of error this is - something like "badRequest" or "itemNotFound" for Identity. :param str message: The message to include in the body. :param int status_code: The status code to set :param request: the request to set the status code on :return: dictionary representing the error body """ request.setResponseCode(status_code) return { msg_type: { "message": message, "code": status_code } }
d73e182fc794f01c3415069ffeb37e76a01df7af
707,704
def is_leap_year(year): """ returns True for leap year and False otherwise :param int year: calendar year :return bool: """ # return (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0) return year % 100 != 0 or year % 400 == 0 if year % 4 == 0 else False
5bd0bb7a44dc7004b9198cb3d8ed244dc02417c2
707,705
def update_search_params(context, **kwargs): """Update the set parameters of the current request""" params = context["request"].GET.copy() for k, v in kwargs.items(): params[k] = v return params.urlencode()
e3ce5a5a1dadc90bb544a761e154214d7a538f30
707,706
import subprocess def preprocess_field_data(subdelimiter, field_value, path_to_script): """Executes a field preprocessor script and returns its output and exit status code. The script is passed the field subdelimiter as defined in the config YAML and the field's value, and prints a modified vesion of the value (result) back to this function. """ cmd = subprocess.Popen([path_to_script, subdelimiter, field_value], stdout=subprocess.PIPE) result, stderrdata = cmd.communicate() return result, cmd.returncode
9cf0261c98652d0811868c91fbb3ab15e6c07af3
707,707
def repeat_batch(t, K, dim=0): """Repeat a tensor while keeping the concept of a batch. :param t: `torch.Tensor`: The tensor to repeat. :param K: `int`: The number of times to repeat the tensor. :param dim: `int`: The dimension to repeat in. This should be the batch dimension. :returns: `torch.Tensor`: The repeated tensor. The new shape will be batch size * K at dim, the rest of the shapes will be the same. Example:: >>> a = torch.arange(10).view(2, -1) >>> a tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> a.repeat(2, 1) tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> repeat_batch(a, 2) tensor([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [5, 6, 7, 8, 9]]) """ shape = t.shape tiling = [1] * (len(shape) + 1) tiling[dim + 1] = K tiled = t.unsqueeze(dim + 1).repeat(tiling) old_bsz = shape[dim] new_bsz = old_bsz * K new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :]) return tiled.view(new_shape)
31ae6e02bd23c56049a4f8e5ea9f36e5b6186678
707,709
import collections def get_deps_info(projects, configs): """Calculates dependency information (forward and backwards) given configs.""" deps = {p: configs[p].get('deps', {}) for p in projects} # Figure out the backwards version of the deps graph. This allows us to figure # out which projects we need to test given a project. So, given # # A # / \ # B C # # We want to test B and C, if A changes. Recipe projects only know about the # B-> A and C-> A dependencies, so we have to reverse this to get the # information we want. downstream_projects = collections.defaultdict(set) for proj, targets in deps.items(): for target in targets: downstream_projects[target].add(proj) return deps, downstream_projects
10215dfb623b8ebaaabdb2d1bcffd876d37f9f66
707,710
from operator import add def average(arr, mode = "mixed"): """ average(arr, mode) takes the average of a given array Once again, the modes of add() can be used here to denote what the type of the array is The function below, determine_mode(arr) can be used to determine the correct mode for your array """ if len(arr) == 0: return 0.0 return add(arr, mode)/len(arr)
74d0b836e6877d1f7d23b69a191e653bcffd6f00
707,711
def compare_floats(value1: float, value2: float): """Função que compara 2 floats""" return True if abs(value1 - value2) <= 10**-6 else False
225a8fd4d472fe630efe32c506cb1ac3f7ff4b5f
707,712
def prettify_seconds(seconds): """ Prettifies seconds. Takes number of seconds (int) as input and returns a prettified string. Example: >>> prettify_seconds(342543) '3 days, 23 hours, 9 minutes and 3 seconds' """ if seconds < 0: raise ValueError("negative input not allowed") signs = {"s": {"singular": "second", "plural": "seconds", }, "h": {"singular": "hour", "plural": "hours"}, "min": {"singular": "minute", "plural": "minutes"}, "d": {"singular": "day", "plural": "days"} } seperator = ", " last_seperator = " and " def get_sign(unit, value): if value == 1 or value == -1: return signs[unit]["singular"] else: return signs[unit]["plural"] days, remainder = divmod(seconds, 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) daystext = "{} {}".format(days, get_sign("d", days)) if days else "" hourstext = "{} {}".format(hours, get_sign("h", hours)) if hours else "" minutestext = "{} {}".format(minutes, get_sign("min", minutes)) if minutes else "" if (not seconds) and (days or hours or minutes): secondstext = "" else: secondstext = "{} {}".format(seconds, get_sign("s", seconds)) output_list = [daystext, hourstext, minutestext, secondstext] filtered = [item for item in output_list if item] if len(filtered) <= 2: output = last_seperator.join(filtered) else: output = seperator.join(filtered[:-1]) + last_seperator + filtered[-1] return output
4b77f9ed3d2085895ef15c6be30b7bfe83d1f49d
707,713
def default_preprocessing(df): """Perform the same preprocessing as the original analysis: https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb """ return df[(df.days_b_screening_arrest <= 30) & (df.days_b_screening_arrest >= -30) & (df.is_recid != -1) & (df.c_charge_degree != 'O') & (df.score_text != 'N/A')]
e6f4d8ceaa09fe71657e7936db886c3eabfb7aa0
707,714
import torch def calculate_regularization_term(means, n_objects, norm): """means: bs, n_instances, n_filters""" bs, n_instances, n_filters = means.size() reg_term = 0.0 for i in range(bs): if n_objects[i]: _mean_sample = means[i, : n_objects[i], :] # n_objects, n_filters _norm = torch.norm(_mean_sample, norm, 1) reg_term += torch.mean(_norm) reg_term = reg_term / bs return reg_term
b6eb43a8915449c7e86d01a08b3ea2e77ae51064
707,715
def ceki_filter(data, bound): """ Check if convergence checks ceki are within bounds""" ceki = data["ceki"].abs() < bound return ceki
09cd53f44241b13cf77eb2299c802ed238580259
707,716
def readGlobalFileWithoutCache(fileStore, jobStoreID): """Reads a jobStoreID into a file and returns it, without touching the cache. Works around toil issue #1532. """ f = fileStore.getLocalTempFile() fileStore.jobStore.readFile(jobStoreID, f) return f
8c784e809acdc1a7fb3d8c108f85ce61bd1ad11c
707,718
def nullColumns(fileHeaders, allKeys): """ Return a set of column names that don't exist in the file. """ s1 = set(fileHeaders) s2 = set(allKeys) return s2.difference(s1)
17a0bb80414fe88f213399958b217ccf6fb5d1e9
707,720
import math def distance_km(lat1, lon1, lat2, lon2): """ return distance between two points in km using haversine http://en.wikipedia.org/wiki/Haversine_formula http://www.platoscave.net/blog/2009/oct/5/calculate-distance-latitude-longitude-python/ Author: Wayne Dyck """ ret_val = 0 radius = 6371 # km lat1 = float(lat1) lon1 = float(lon1) lat2 = float(lat2) lon2 = float(lon2) dlat = math.radians(lat2-lat1) dlon = math.radians(lon2-lon1) a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \ * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) ret_val = radius * c return ret_val
f50d444b5769b1d00045429e3d577ec22f922774
707,721
def _flip(r, u): """Negate `r` if `u` is negated, else identity.""" return ~ r if u.negated else r
18ddcf5132867f5646c729bdadcb2c5077df8c03
707,722
import csv def obterUFEstadoPorNome(estado): """ Retorna o codigo UF do estado a partir do nome do estado :param estado: Nome do estado :return codigoDoEstado: Código UF do estado """ try: with open("./recursos/estados.csv", newline="") as csvfile: reader = csv.DictReader(csvfile, delimiter=";") for state in reader: if state["Unidade_Federativa"].lower() == estado.strip().lower(): return state["UF"] except Exception as exc: print("[ERROR]{0}".format(exc))
9b136fe8c557e5f75bca235cf66168f92244a4e6
707,723
import random def get_random_byte_string(byte_length): """ Use this function to generate random byte string """ byte_list = [] i = 0 while i < byte_length: byte_list.append(chr(random.getrandbits(8))) i = i + 1 # Make into a string byte_string = ''.join(byte_list) return byte_string
0ea923a045beb476501dc3d8983f3fe89efef008
707,724
def is_igb(request): """ Checks the headers for IGB headers. """ if 'HTTP_EVE_TRUSTED' in request.META: return True return False
1e6485614063a9f4eec36407b60154300d38db76
707,725
import math def area(rad: float = 1.0) -> float: """ return area of a circle >>> area(2.0) 3.141592653589793 >>> area(3.0) 7.0685834705770345 >>> area(4.0) 12.566370614359172 """ return rad * rad * math.pi / 4
702fc4a9fa370804d88d1182f966890bc0634466
707,726
def create_roots(batch_data): """ Create root nodes for use in MCTS simulation. Takes as a parameter a list of tuples, containing data for each game. This data consist of: gametype, state, type of player 1 and type of player 2 """ root_nodes = [] for data in batch_data: game = data[0] state = data[1] player_1 = data[2] player_2 = data[3] player = player_1 if game.player(state) else player_2 root_nodes.append(player.create_root_node(state)) return root_nodes
d07b0781605b01d08c9ef78f30dad9254ade9907
707,727
import argparse def get_parser(): """Creates an ArgumentParser object.""" parser = argparse.ArgumentParser( "clinker", description="clinker: Automatic creation of publication-ready" " gene cluster comparison figures.\n\n" "clinker generates gene cluster comparison figures from GenBank files." " It performs pairwise local or global alignments between every sequence" " in every unique pair of clusters and generates interactive, to-scale comparison figures" " using the clustermap.js library.", epilog="Example usage\n-------------\n" "Align clusters, plot results and print scores to screen:\n" " $ clinker files/*.gbk\n\n" "Cameron Gilchrist, 2020", formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument("files", help="Gene cluster GenBank files", nargs="+") alignment = parser.add_argument_group("Alignment options") alignment.add_argument( "-na", "--no_align", help="Do not align clusters", action="store_true", ) alignment.add_argument( "-i", "--identity", help="Minimum alignment sequence identity", type=float, default=0.3 ) alignment.add_argument( "-j", "--jobs", help="Number of alignments to run in parallel (0 to use the number of CPUs)", type=int, default=0, ) output = parser.add_argument_group("Output options") output.add_argument("-s", "--session", help="Path to clinker session") output.add_argument("-ji", "--json_indent", type=int, help="Number of spaces to indent JSON") output.add_argument("-f", "--force", help="Overwrite previous output file", action="store_true") output.add_argument("-o", "--output", help="Save alignments to file") output.add_argument( "-p", "--plot", nargs="?", const=True, default=False, help="Plot cluster alignments using clustermap.js. If a path is given," " clinker will generate a portable HTML file at that path. Otherwise," " the plot will be served dynamically using Python's HTTP server." ) output.add_argument("-dl", "--delimiter", help="Character to delimit output by") output.add_argument("-dc", "--decimals", help="Number of decimal places in output", default=2) output.add_argument( "-hl", "--hide_link_headers", help="Hide alignment column headers", action="store_true", ) output.add_argument( "-ha", "--hide_aln_headers", help="Hide alignment cluster name headers", action="store_true", ) viz = parser.add_argument_group("Visualisation options") viz.add_argument( "-ufo", "--use_file_order", action="store_true", help="Display clusters in order of input files" ) return parser
881c7bc495edd37011c07d5db6ac80c816855f4a
707,728
def simulate_until_target_substate_or_max_t( _simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables): """ Perform simulation to figure whether it reaches target substate. Does not return states of simulations that don't reach target substate. Target substate is not considered as reached until all the perturbations are carried out. Initial state can be considered as reached target substate if no perturbations are present. :param _simulate_until_attractor_or_target_substate_or_max_t: [function] to perform simulation :param initial_state: initial state of the network :param perturbed_nodes_by_t: dict (by time steps) of dicts (by nodes) of node states :param predecessor_node_lists: list of predecessor node lists :param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state) :return: list of states where last state contains target substate, or None if target substate was not reached """ states, *_, target_substate_is_reached, _ = _simulate_until_attractor_or_target_substate_or_max_t( initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables) return states if target_substate_is_reached else None
526ef8085dcbe4bcbc112c3bd4626ec5247e2f97
707,729
from typing import Mapping def flat_dict(d, prefix=""): """ Loop through dictionary d Append any key, val pairs to the return list ret Add the prefix to any key param Recurse if encountered value is a nested dictionary. """ if not isinstance(d, Mapping): return d ret = {} for key, val in d.items(): if isinstance(val, Mapping): ret = {**ret, **flat_dict(val, prefix=prefix + str(key) + "_")} else: ret[prefix + str(key)] = val return ret
f0c1f519126dea89c25ee38a9b0dd788c40d2088
707,732
import logging def _get_filehandler_with_formatter(logname, formatter=None): """ Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: """ handler = logging.FileHandler(logname) if formatter is not None: handler.setFormatter(formatter) return handler
1cc6f83480e691c4c54c359deabd6364da65f320
707,733
from typing import Tuple def to_int(s: str) -> Tuple[bool, int]: """Convert a string s to an int, if possible.""" try: n = int(s) return True, n except Exception: return False, 0
27d24b881f5987037f750a1cee022f7b1daa7c33
707,734
from typing import List def multiply_aug(data_aug: List[str], factor: int) -> List[str]: """ The original idea here was to use to to speed up some vasp calculations for supercells by initializing the entire CHGCAR file. The current code does not deal with transformation of the Augemetation charges after regridding. This is a naive way to multiply the Augmentation data in the CHGCAR, a real working implementation will require analysis of the PAW projection operators. However, even with such an implementation, the speed up will be minimal due to VASP's interal minimization algorithms. Args: data_aug: The original augmentation data from a CHGCAR factor: The multiplication factor (some integer number of times it gets repeated) Returns: List of strings for each line of the Augmentation data. """ res = [] # type: List[str] cur_block = [] # type: List[str] cnt = 0 for ll in data_aug: if "augmentation" in ll: if cur_block: for j in range(factor): cnt += 1 cur_block[ 0 ] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n" res.extend(cur_block) cur_block = [ll] else: cur_block.append(ll) else: for j in range(factor): cnt += 1 cur_block[ 0 ] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n" res.extend(cur_block) return res
2baef4c98dbb83f1a08f11e58f3c4cf82ad8ea64
707,735
import torch def mlp_layers(nch_input, nch_layers, b_shared=True, bn_momentum=0.1, dropout=0.0): """ [B, Cin, N] -> [B, Cout, N] or [B, Cin] -> [B, Cout] """ layers = [] last = nch_input for i, outp in enumerate(nch_layers): if b_shared: weights = torch.nn.Conv1d(last, outp, 1) else: weights = torch.nn.Linear(last, outp) layers.append(weights) layers.append(torch.nn.BatchNorm1d(outp, momentum=bn_momentum)) layers.append(torch.nn.ReLU()) if b_shared == False and dropout > 0.0: layers.append(torch.nn.Dropout(dropout)) last = outp return layers
8085b99b828fcbadee191d90737d582f7dd9ce73
707,736
def getTrackIds(sp, username, playlist, offset=0): """ Returns the ids of the tracks contained in a playlist :param sp: A spotipy.Spotify object to be used for the request. :param username: The username of the user who's playlists you want the retrieve. :param playlist: Name of the playlist from wich the tracks are retrieved. :param offset: Do not worry about this parameter, it is used for recursion. :returns: A list containing all the ids of the tracks that are in the playlist. """ limit = 100 fields = "items(track(id)), total" api_response = sp.user_playlist_tracks(username, playlist["id"], fields, limit=limit, offset=offset) track_ids = [x["track"]["id"] for x in api_response["items"]] if api_response["total"] > limit + offset: next_page = getTrackIds(sp, username, playlist, offset + limit) for item in next_page: track_ids.append(item) return track_ids
5b4e621022f49137b7fd4547bf5ab4efe92b4515
707,737
def filter_subclasses(superclass, iter): """Returns an iterable of class obects which are subclasses of `superclass` filtered from a source iteration. :param superclass: The superclass to filter against :return: An iterable of classes which are subclasses of `superclass` """ return filter(lambda klass: issubclass(klass, superclass), iter)
2a891835379dfa3661d781d0c1860b650df013f0
707,738
def keep_point(p, frame): """ p: TrackedPoint instance frame: image (numpy array) """ if not p.in_bounds(): return False if p.coasted_too_long(): return False if p.coasted_too_far(): return False return True
7f51b9f15ac8befe07b463875b9245194aebbef0
707,739
import pathlib def path_to_filename(path, with_suffix=True): """Get filename from path. Parameters ========== path : str Path to retrieve file name from e.g. '/path/to/image.png'. with_suffix : bool Whether to include the suffix of file path in file name. Returns ======= str The file name of the path e.g. 'image.png' or 'image' if `with_suffix` is false. """ p = pathlib.Path(path) if with_suffix: return str(p.name) else: return str(p.with_suffix("").name)
45ecfb6e263e65de7165a69eda99bc8de2a157f4
707,740
def get_blueprint_docs(blueprints, blueprint): """Returns doc string for blueprint.""" doc_string = blueprints[blueprint].__doc__ return doc_string
8a334a9ddd1ff5fe844821152f4312b2db0e9da5
707,742
def is_binary(file_path): """ Returns True if the file is binary """ with open(file_path, 'rb') as fp: data = fp.read(1024) if not data: return False if b'\0' in data: return True return False
2df56f93d4e31220a580bf1e659c3c51b96260d2
707,743
def handle_over_max_file_size(error): """ Args: error: Returns: """ print("werkzeug.exceptions.RequestEntityTooLarge" + error) return 'result : file size is overed.'
2bbdc1e38dea46ac08c314b3962ed63063578021
707,744
def firstUniqChar(self, s): """ :type s: str :rtype: int """ letters = 'abcdefghijklmnopqrstuvwxyz' index = [s.index(l) for l in letters if s.count(l) == 1] return min(index) if len(index) > 0 else -1
8b42b281c9e80cf89fb9952a0fe7c60c5270c210
707,745
def dict_remove_key(d, key, default=None): """ removes a key from dict __WITH__ side effects Returns the found value if it was there (default=None). It also modifies the original dict. """ return d.pop(key, default)
47bd0edf2bbeb9bad5c696d289c69d2d9eba6a1b
707,746
def hex_machine(emit): """ State machine for hex escaped characters in strings Args: emit (callable): callback for parsed value (number) Returns: callable: hex-parsing state machine """ left = 4 num = 0 def _hex(byte_data): nonlocal num, left if 0x30 <= byte_data <= 0x39: # 0-9 i = byte_data - 0x30 elif 0x61 <= byte_data <= 0x66: # a-f i = byte_data - 0x57 elif 0x41 <= byte_data <= 0x46: # A-F i = byte_data - 0x37 else: raise Exception( "Invalid hex char in string hex escape: " + hex(byte_data)) left -= 1 num |= i << (left * 4) if left: return _hex return emit(num) return _hex
39232fdaf3c0ae19154e28307fb7f1254133dc94
707,747
def nightwatch_environment(request): # convenience spelling """Run tests against this environment (staging, production, etc.)""" return request.config.getoption('--nightwatch-environment')
dc284660e062abf1b74a327e4b045cf79a64ee3a
707,748
def resolve(match, *objects): """Given an array of objects and a regex match, this function returns the first matched group if it exists in one of the objects, otherwise returns the orginial fully matches string by the regex. Example: if regex = \\\.([a-z]) and string = test\.abc, then the match = {group0: \.abc, group1: abc}. Assuimg one object: - obj = {abc: def}, then we return 'def' - obj = {test: value}, then we return \.abc Args: objects (array[dict]): the array of objects we use to look up the key in match.group(1) match: the regex match object Returns: str: the value of the matched group(1) in the first object found if exists, otherwise returns the fully matched string. """ for obj in objects: if obj is not None and match.group(1) in obj: return str(obj[match.group(1)]) return match.group(0)
52f59fb5248ba635866fcd59a549067c3984e460
707,749
import argparse def parse_args(args): """ Parse command line parameters Parameters ---------- args : list command line parameters as list of strings Returns ------- argparse.Namespace : obj command line parameters namespace """ parser = argparse.ArgumentParser( description="Create mesh and linear system of a PDE via Galerkins method." ) parser.add_argument( "-f", "--file", dest="data_path", help="filepath to save data at", default="../../data/Galerkins_method/", type=str, ) parser.add_argument( "-r", "--resolutions", dest="resolutions", help="Mesh resolutions.", default=[6, 128], type=list, ) return parser.parse_args(args)
c87a3dbb37b84076ac4d1cf3506a69abaac2c968
707,750
import decimal def float_to_str(f, p=20): """ 将给定的float转换为字符串,而无需借助科学计数法。 @param f 浮点数参数 @param p 精读 """ if type(f) == str: f = float(f) ctx = decimal.Context(p) d1 = ctx.create_decimal(repr(f)) return format(d1, 'f')
551ab2f58b48e4005d8b5a85a7eb096e4e749d23
707,751
def _vagrant_format_results(line): """Extract fields from vm status line. :param line: Status line for a running vm :type line: str :return: (<vm directory path>, <vm status>) :rtype: tuple of strings """ line_split = line.split() return (line_split[-1], line_split[-2],)
78788572e6b695696621775c28ae8b3a1e577ee3
707,753
from typing import Dict def binary_to_single(param_dict: Dict[str, float], star_index: int) -> Dict[str, float]: """ Function for converting a dictionary with atmospheric parameters of a binary system to a dictionary of parameters for one of the two stars. Parameters ---------- param_dict : dict Dictionary with the atmospheric parameters of both stars. The keywords end either with ``_0`` or ``_1`` that correspond with ``star_index=0`` or ``star_index=1``. star_index : int Star index (0 or 1) that is used for the parameters in ``param_dict``. Returns ------- dict Dictionary with the parameters of the selected star. """ new_dict = {} for key, value in param_dict.items(): if star_index == 0 and key[-1] == "0": new_dict[key[:-2]] = value elif star_index == 1 and key[-1] == "1": new_dict[key[:-2]] = value elif key in ["teff", "logg", "feh", "c_o_ratio", "fsed", "radius", "distance"]: new_dict[key] = value return new_dict
21099162ffe83715892abf82660e35ee98e02930
707,755
def welcome(): """List all available api routes.""" return ( """Available Routes: /api/v1.0/precipitation Convert the query results to a dictionary using date as the key and prcp as the value. Return the JSON representation of your dictionary. /api/v1.0/stations Return a JSON list of stations from the dataset. /api/v1.0/tobs Return a JSON list of temperature observations (TOBS) for the previous year. /api/v1.0/start_date /api/v1.0/start_date/end_date Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range. When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date. When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive. """ )
83fcd43ff8dddd0596232dfb4420525bc592b583
707,756
def build_data_request(mac, request_type='current', interval=1, units='english'): """ Creates RainWise API request for Recent Data based on station mac, format (optional), and units (optional) """ # Check if interval requested is valid interval if interval not in [1, 5, 10, 15, 30, 60]: raise ValueError('Invalid Request: Parameter interval must be 1, 5, 10, 15, 30, or 60') # Check if units requested are valid units if units.lower() not in ['english', 'metric']: raise ValueError('Invalid Request: Parameter units must be english or metric') # Build request URL for current conditions if request_type == 'current': return f'http://api.rainwise.net/main/v1.4/get-data.php?mac={mac}&format=json' # Build request URL for recent data elif request_type == 'recent': return f'http://api.rainwise.net/main/v1.4/get-recent.php?mac={mac}&interval={interval}&units={units}&format=json' raise ValueError('Invalid Request: Parameter request_type must be either ''current'' or ''recent''')
733c20f5c67fe2c630427bfb70ab563df111558c
707,757
import os def canonicalize(top_dir): """ Canonicalize filepath. """ return os.path.realpath(top_dir)
ad0eb534bed1ad656820de776a1845161bdafced
707,758
import time def convert_epoch_to_mysql_timestamp(epoch_timestamp): """ Converts a given epoch timestamp in seconds to the MySQL datetime format. :param epoch_timestamp: The timestamp as seconds since epoch time :return: The MySQL timestamp string in the format 'Y-m-d HH:MM:SS' :rtype: str """ try: epoch = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(epoch_timestamp)) return epoch except Exception as e: print(e) return None
15647a816e638e7668e2e830ebc4f1c6fdb2f030
707,759
def check_public_key(pk): """ Checks if a given string is a public (or at least if it is formatted as if it is). :param pk: ECDSA public key to be checked. :type pk: hex str :return: True if the key matches the format, raise exception otherwise. :rtype: bool """ prefix = pk[0:2] l = len(pk) if prefix not in ["02", "03", "04"]: raise Exception("Wrong public key format.") if prefix == "04" and l != 130: raise Exception( "Wrong length for an uncompressed public key: " + str(l)) elif prefix in ["02", "03"] and l != 66: raise Exception("Wrong length for a compressed public key: " + str(l)) else: return True
120b3e88a96db45e5e4df0996414448da8b84462
707,760
def empty_tree(input_list): """Recursively iterate through values in nested lists.""" for item in input_list: if not isinstance(item, list) or not empty_tree(item): return False return True
1dceb351aac4db23b57394a531db38a3edf61a8c
707,761
import re def apply_subst(name, user): """ user.username forced in lowercase (VMware Horizon) """ name = re.sub(r'_SCIPER_DIGIT_', user.sciper_digit, name) name = re.sub(r'_SCIPER_', user.sciper, name) name = re.sub(r'_USERNAME_', user.username.lower(), name) name = re.sub(r'_HOME_DIR_', user.home_dir, name) name = re.sub(r'_GROUPNAME_', user.groupname, name) name = re.sub(r'_DOMAIN_', user.domain, name) name = re.sub(r'_UID_', user.uid, name) name = re.sub(r'_GID_', user.gid, name) name = re.sub(r'_FSTYPE_', user.automount_fstype, name) name = re.sub(r'_HOST_', user.automount_host, name) name = re.sub(r'_PATH_', user.automount_path, name) name = re.sub(r'_OPTIONS_', user.automount_options, name) return name
b2df5630cc63ecf0e8468e2eb19019ec4bd9ad2a
707,762
def A_real_deph(Q_deph, Kt_real_deph, deltaT_diff_deph): """ Calculates the real heatransfer area. Parameters ---------- Q_deph : float The heat load of dephlegmator, [W] , [J/s] deltaT_diff_deph : float The coefficient difference of temperatures, [degrees celcium] Kt_real_deph : float The heat ransfer coefficient [W/(m**2 * degrees celcium)] Returns ------- A_real_deph : float The real heat ransfer area, [m**2] References ---------- Романков, формула 4.72, стр.168 """ return Q_deph / (Kt_real_deph * deltaT_diff_deph)
5c70a6e179922f90fbb4fda859d6911eb1f048e6
707,763
import random import bisect def generate_sector(size: int, object_weight: list) -> dict: """ Generates an Sector with Weighted Spawns Args: size: Int Representing the Size of the Sector (Size X Size) object_weight: An Nested List with Object / Value Types Examples: generate_sector(6, [["*", 50], ["#", 10]]) would output an Map File where * is far more Common than # Returns: An Dict with Lists inside which Represent the Map Data per Row """ if size is 0: raise ValueError("The Sector Size cant be 0") size += 1 output = {} placed_player = False totals = [] running_total = 0 for w in object_weight: running_total += w[1] totals.append(running_total) def next(): """ Gets an Random Object from the Object - Weight List """ ran = random.random() * totals[-1] i = bisect.bisect_right(totals, ran) return object_weight[i][0] for x in range(1, size): row = [] for y in range(1, size): object = next() if placed_player is False and object is "@": row.append(object) placed_player = True continue elif placed_player is True and object is "@": while object is "@": object = next() row.append(object) output[x] = row return output
514195b66c707b2e0dd67ea47b57fe56c1d28a86
707,764
def parse_kafka_table(beamsqltable, name, logger): # loop through the kafka structure # map all key value pairs to 'key' = 'value', # except properties """ parse kafka parameter """ ddl = "" kafka = beamsqltable.spec.get("kafka") if not kafka: message = f"Beamsqltable {name} has no Kafka connector descriptor." logger.warning(message) return None # check mandatory fields in Kafka, topic, bootstrap.server if not kafka.get("topic"): message = f"Beamsqltable {name} has no kafka topic." logger.warning(message) return None try: _ = kafka["properties"]["bootstrap.servers"] except KeyError: message = f"Beamsqltable {name} has no kafka bootstrap servers found" logger.warning(message) return None # the other fields are inserted, there is not a check for valid fields yet for kafka_key, kafka_value in kafka.items(): # properties are iterated separately if kafka_key == 'properties': for property_key, property_value in kafka_value.items(): ddl += f",'properties.{property_key}' = '{property_value}'" else: ddl += f", '{kafka_key}' = '{kafka_value}'" key_format = kafka.get("key.format") if key_format is None: message = f"Beamsqltable {name} has no key.format but it is mandatory \ for upsert-kafka" logger.warning(message) return None return ddl
5a8baf4ee5ef935b12cd90957854c6c1aed3c4e5
707,765
def compare_AlphaFz(sq_amp,sq_amp_baseline): """ Compare the baseline alpha squared amplitude with that of a single epoch. Parameters ---------- sq_amp: float Alpha squared amplitude (Fz) from a single epoch cnt_baseline: float Baseline alpha squared amplitude (Fz) Returns ------- feedback_val: float Feedback value for stimulus presentation [-1,1] """ relative_error = (sq_amp-sq_amp_baseline)/sq_amp_baseline feedback_val = relative_error if feedback_val>1: feedback_val = 1 elif feedback_val<-1: feedback_val = -1 return feedback_val
290560dc815393799d61f51a7684b4bde309dbac
707,766
def _unpad(string: str) -> str: """Un-pad string.""" return string[: -ord(string[len(string) - 1 :])]
dbd036afabc29047201a9ed2d6b299bb5fe3ba0f
707,767
from pathlib import Path import shutil def copy_dir_to_target(source_directory: Path, destination_directory: Path) -> bool: """ Args: source_directory: a folder to copy destination_directory: the parent directory to copy source_directory into Returns: True if copy was successful, False otherwise """ if source_directory.exists() and source_directory.is_dir(): print("Found directory at %s" % source_directory.resolve()) else: print("Unable to find required folder, looked at %s" % source_directory.resolve()) return False print("Copying to %s" % destination_directory) shutil.copytree(str(source_directory), str(destination_directory / source_directory.name)) return True
2dd67db56c17c787ea69189c52db11edcfcb0d3c
707,768
from typing import Optional def get_next_url(bundle: dict) -> Optional[str]: """ Returns the URL for the next page of a paginated ``bundle``. >>> bundle = { ... 'link': [ ... {'relation': 'self', 'url': 'https://example.com/page/2'}, ... {'relation': 'next', 'url': 'https://example.com/page/3'}, ... {'relation': 'previous', 'url': 'https://example.com/page/1'}, ... ] ... } >>> get_next_url(bundle) 'https://example.com/page/3' >>> bundle = { ... 'link': [ ... {'relation': 'self', 'url': 'https://example.com/page/1'}, ... ] ... } >>> type(get_next_url(bundle)) <class 'NoneType'> """ if 'link' in bundle: for link in bundle['link']: if link['relation'] == 'next': return link['url']
0fafa4dc56fb5e03838652419e94dceb8aed9e75
707,769
def get_mean_and_stdv(dataset): """return means and standard deviations along 0th axis of tensor""" means = dataset.mean(0) stdvs = dataset.std(0) return means, stdvs
562f883d809f034be66244ad593a6f8a0bbe2ba5
707,770
def spltime(tseconds): """ This gets the time in hours, mins and seconds """ hours = tseconds // 3600 minutes = int(tseconds / 60) % 60 seconds = tseconds % 60 return hours, minutes, seconds
a8ba14879da51ebbeac2ba201fc562a22fe13364
707,771
def draw_text(text, bgcolor, plt_ax, text_plt): """ Render the text :param str text: text to render :param str bgcolor: backgroundcolor used to render text :param matplotlib.axes.Axes plt_ax: figure sub plot instance :param matplotlib.text.Text text_plt: plot of text :return matplotlib.text.Text: updated plot of text """ if text_plt is None: # render text with color text_plt = plt_ax.text(0.95, 0.95, text, backgroundcolor=bgcolor, horizontalalignment='right', verticalalignment='top', transform=plt_ax.transAxes, fontsize=10) else: # update existing text text_plt.set_text(text) return text_plt
478ada3b4fbb3add935713268415cd4606ef58b3
707,772
def _pytype(dtype): """ return a python type for a numpy object """ if dtype in ("int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"): return int elif dtype in ("float16", "float32", "float64", "float128"): return float elif dtype in ("complex64", "complex128", "complex256"): return complex else: raise TypeError("not a recognized dtype: {0}".format(dtype))
47ed12c47ee4fa5986795b8f432e72cdd7ee945f
707,773
import uuid def generate_uuid() -> str: """ Generate UUIDs to use as `sim.base_models.Node` and `sim.base_models.Item` ids. """ return str(uuid.uuid4())
9428676bb633873a2f32c53172146486f1421234
707,774
def merge_dicts(a, b): """combine two dictionaries, assuming components are arrays""" result = a for k, v in b.items(): if k not in result: result[k] = [] result[k].extend(v) return result
de465179faf1bd9ace312fa4b21d332ac994b72b
707,775
def parser_content_labelling_Descriptor(data,i,length,end): """\ parser_content_labelling_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). This descriptor is not parsed at the moment. The dict returned is: { "type": "content_labelling", "contents" : unparsed_descriptor_contents } (Defined in ETSI TS 102 323 specification) """ return { "type" : "content_labelling", "contents" : data[i+2:end] }
1aa9c68fd186df4dbda7200f2b40f617479a09d9
707,776
import pathlib def get_scripts_folder(): """ return data folder to use for future processing """ return (pathlib.Path(__file__).parent.parent)
db236b35a06a0506f441ce6f11d8d93807592b04
707,777
import re def parse_signature(signature): """ Parses one signature :param signature: stanc3 function signature :return: return type, fucntion name and list of function argument types """ return_type, rest = signature.split(" ", 1) function_name, rest = rest.split("(", 1) args = re.findall(r"(?:[(][^()]+[)][^,()]+)|(?:[^,()]+(?:,*[]])?)", rest) args = [i.strip() for i in args if i.strip()] return return_type, function_name, args
11da2fb6008274f8d9a959651a181f127c85a34e
707,778
import torch import time def compute_fps(model, shape, epoch=100, device=None): """ frames per second :param shape: 输入数据大小 """ total_time = 0.0 if device: model = model.to(device) for i in range(epoch): data = torch.randn(shape) if device: data = data.to(device) start = time.time() outputs = model(data) end = time.time() total_time += (end - start) return total_time / epoch
b2e80cf695fe4e4be8890c4f28db8ae37e2f8dfe
707,780
def _GetKeyKind(key): """Return the kind of the given key.""" return key.path().element_list()[-1].type()
c37f1d889e484390449de682e3d6c6b9d4521ce4
707,781
import yaml def load_config() -> dict: """ Loads the config.yml file to memory and returns it as dictionary. :return: Dictionary containing the config. """ with open('config.yml', 'r') as ymlfile: return yaml.load(ymlfile, Loader=yaml.FullLoader)
6e05aa4eb6a7d9862814f595ecdc89ffab145ee5
707,782
import re import sys def getBaseCount(reads, varPos): """ :param reads: :param varPos: """ ''' returns the baseCount for the ''' baseCount = {'A': 0, 'C': 0, 'G': 0, 'T': 0} for read in reads: readPos = 0 mmReadPos = 0 startPos = read.pos try: cigarNums = re.split("[MIDNSHP]", read.cigarstring)[:-1] cigarLetters = re.split("[0-9]+", read.cigarstring)[1:] except TypeError: continue # for unmapped reads the cigarstring is empty # to avoid a query for unmapped reads all the # time the error is catched and the read will be skipped # raise TypeError("Invalid Cigar String %s" % read.cigarstring) for i in range(len(cigarLetters)): # parse over single read if cigarLetters[i] in {"I", "S", "H"}: # Insertion, Soft Clipping and Hard Clipping readPos = readPos + int(cigarNums[i]) elif cigarLetters[i] in {"D", "N"}: # Deletions and skipped Regions startPos = startPos + int(cigarNums[i]) elif cigarLetters[i] in {"M"}: # Matches for j in range(int(cigarNums[i])): if startPos == varPos: mmReadPos = readPos mmReadBase = read.seq[mmReadPos] try: baseCount[mmReadBase] += 1 # increase number for the base at the mm pos except KeyError: sys.stderr.write("unknown Base %s \n" % mmReadBase) readPos += 1 startPos += 1 return map(str, [baseCount['A'], baseCount['C'], baseCount['G'], baseCount['T']])
26503aebeb47ff949b09fdb4b8ada170f827d4f5
707,783
import argparse def getparser(): """ Use argparse to add arguments from the command line Parameters ---------- createlapserates : int Switch for processing lapse rates (default = 0 (no)) createtempstd : int Switch for processing hourly temp data into monthly standard deviation (default = 0 (no)) Returns ------- Object containing arguments and their respective values. """ parser = argparse.ArgumentParser(description="select pre-processing options") # add arguments parser.add_argument('-createlapserates', action='store', type=int, default=0, help='option to create lapse rates or not (1=yes, 0=no)') parser.add_argument('-createtempstd', action='store', type=int, default=0, help='option to create temperature std of daily data or not (1=yes, 0=no)') return parser
6ccde8f0e02124fa537205da398a842c88a62046
707,784
def prolog_rule(line): """Specify prolog equivalent""" def specify(rule): """Apply restrictions to rule""" rule.prolog.insert(0, line) return rule return specify
dde4840dc2f8f725d4c4c123aed7c978ec1948f9
707,785