content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def H(r2, H_s, H_d, a_s, a_d, gamma_s, gamma_d, G, v): """ """ pi = math.pi sqrt = math.sqrt r = sqrt(r2) H2_s = H_s**2 H2_d = H_d**2 R2_s = r2 + H2_s R2_d = r2 + H2_d alpha_s = 1.0 if gamma_s == 1.0 else 4 * H2_s / (pi*R2_s) alpha_d = 1.0 if gamma_d == 1.0 else 4 * H2_d / (pi*R2_d) f_s = a_s**3 * alpha_s * (1-v) / (G * (H2_s+r2)**1.5) f_d = a_d**3 * alpha_d * (1-v) / (G * (H2_d+r2)**1.5) H = [ [ r*f_s, r*f_d ], # the radial H [ H_s*f_s, H_d*f_d ] # the vertical H ] return H
0fa1606212278def22075692a56468d41a8c7a3c
1,098
def addBenchmark(df): """Add benchmark to df.""" # Compute the inverse of the distance distance_inv = (1. / df.filter(regex='^distance*', axis=1)).values # Extract the value at the nearest station values = df.filter(regex='value_*', axis=1) # Compute the benchmark numer = (distance_inv * values).sum(axis=1) denom = (distance_inv * (values != 0)).sum(axis=1) # Compute the benchmark benchmark = numer / denom df["Benchmark"] = benchmark return df
62c63215d622c46bed8200f97ad55b985e2beb20
1,100
def is_file_like(f): """Check to see if ```f``` has a ```read()``` method.""" return hasattr(f, 'read') and callable(f.read)
9eee8c8f4a6966d1db67fb4aa9149e2fbd390fb9
1,101
def check_protocol(protocol): """ Check if a given protocol works by computing the qubit excitation probabilities """ qubit_weight = {} qubit_weight[protocol[0][0][0]] = 1.0 for pair_set in protocol: for i, j, p in pair_set: qubit_weight[j] = qubit_weight[i] * (1.0 - p) qubit_weight[i] *= p return qubit_weight
8b9d0a8e329a340718d37bc79066be4a05cf2d20
1,102
def choose_first_not_none(*args): """ Choose first non None alternative in args. :param args: alternative list :return: the first non None alternative. """ for a in args: if a is not None: return a return None
fe3efba85251161cd0a6ecb50583cc443cd04dc0
1,103
def matrix(mat,nrow=1,ncol=1,byrow=False): """Given a two dimensional array, write the array in a matrix form""" nr=len(mat) rscript='m<-matrix(data=c(' try: nc=len(mat[0]) for m in mat: rscript+=str(m)[1:-1]+ ', ' rscript=rscript[:-2]+'), nrow=%d, ncol=%d, byrow=TRUE,' %(nr,nc) except TypeError: rscript+=str(mat)[1:-1]+',' rscript=rscript[:-1]+'), nrow=%d, ncol=%d,' %(nrow,ncol) if byrow: rscript+='byrow=TRUE,' rscript=rscript[:-1]+')\n' return rscript
a28d91d797238857dd2ff58f24655504a936d4a7
1,104
def add_dict(dct1, dct2): """Returns a new dictionaries where the content of the dictionaries `dct1` and `dct2` are merged together.""" result = dct1.copy() result.update(dct2) return result
eba785e4d00534e94c1bdde413603d64e18aac05
1,105
def template14(): """Simple ML workflow""" script = """ ## (Enter,datasets) << host = chemml << function = load_cep_homo >> smiles 0 >> homo 4 ## (Store,file) << host = chemml << function = SaveFile << format = smi << header = False << filename = smiles >> 0 df >> filepath 1 ## (Represent,molecular descriptors) << host = chemml << function = RDKitFingerprint >> 1 molfile >> df 2 >> df 3 ## (Store,file) << host = chemml << function = SaveFile << filename = fps_rdkfp >> 2 df ## (Prepare,split) << host = sklearn << function = train_test_split >> 3 dfx >> 4 dfy >> dfx_train 5 >> dfy_train 6 >> dfx_test 8 >> dfy_test 11 ## (Model,regression) << host = sklearn << function = MLPRegressor << func_method = fit >> 5 dfx >> 6 dfy >> api 7 ## (Model,regression) << host = sklearn << function = MLPRegressor << func_method = predict >> 7 api >> 8 dfx >> dfy_predict 9 >> dfy_predict 10 ## (Store,file) << host = chemml << function = SaveFile << filename = dfy_predict >> 9 df ## (Visualize,plot) << host = chemml << function = scatter2D << x = 0 << y = 0 >> 10 dfx >> 11 dfy >> fig 12 ## (Store,figure) << host = chemml << function = SavePlot << filename = dfy_actual_vs_dfy_predict << output_directory = . >> 13 fig ## (Visualize,artist) << host = chemml << function = decorator << title = true vs. predicted HOMO energy << xlabel = predicted HOMO energy (eV) << ylabel = true HOMO energy (eV) << grid = True << grid_color = g << size = 18 >> 12 fig >> fig 13 """ return script.strip().split('\n')
d321d2016f0894d0a0538a09f6bc17f3f690317b
1,108
def mapdict(itemfunc, dictionary): """ Much like the builtin function 'map', but works on dictionaries. *itemfunc* should be a function which takes one parameter, a (key, value) pair, and returns a new (or same) (key, value) pair to go in the dictionary. """ return dict(map(itemfunc, dictionary.items()))
1f0573410f82acb1f3c06029cf4bfaccd295e1ac
1,110
def _map_tensor_names(original_tensor_name): """ Tensor name mapping """ global_tensor_map = { "model/wte": "word_embedder/w", "model/wpe": "position_embedder/w", "model/ln_f/b": "transformer_decoder/beta", "model/ln_f/g": "transformer_decoder/gamma", } if original_tensor_name in global_tensor_map: return global_tensor_map[original_tensor_name] original_tensor_name_split = original_tensor_name.split('/') layer_tensor_map = { "ln_1/b": "beta", "ln_1/g": "gamma", "ln_2/b": "past_poswise_ln/beta", "ln_2/g": "past_poswise_ln/gamma", "mlp/c_fc/b": "ffn/conv1/bias", "mlp/c_fc/w": "ffn/conv1/kernel", "mlp/c_proj/b": "ffn/conv2/bias", "mlp/c_proj/w": "ffn/conv2/kernel", "attn/c_proj/b": "self_attention/multihead_attention/output/bias", "attn/c_proj/w": "self_attention/multihead_attention/output/kernel", } layer_num = int(original_tensor_name_split[1][1:]) layer_feature = '/'.join(original_tensor_name.split('/')[2:]) if layer_feature in layer_tensor_map: layer_feature_ = layer_tensor_map[layer_feature] tensor_name_ = '/'.join( [ 'transformer_decoder', 'layer_{}'.format(layer_num), layer_feature_ ]) return tensor_name_ else: return original_tensor_name
3331d13e667ee3ef363cdeca5122e8a256202c39
1,111
def get_classpath(obj): """ Return the full module and class path of the obj. For instance, kgof.density.IsotropicNormal Return a string. """ return obj.__class__.__module__ + "." + obj.__class__.__name__
bf986e2b27dd8a216a2cc2cdb2fb2b8a83b361cc
1,112
import numpy as np def label_generator(df_well, df_tops, column_depth, label_name): """ Generate Formation (or other) Labels to Well Dataframe (useful for machine learning and EDA purpose) Input: df_well is your well dataframe (that originally doesn't have the intended label) df_tops is your label dataframe (this dataframe should ONLY have 2 columns) 1st column is the label name (e.g. formation top names) 2nd column is the depth of each label name column_depth is the name of depth column on your df_well dataframe label_name is the name of label that you want to produce (e.g. FM. LABEL) Output: df_well is your dataframe that now has the labels (e.g. FM. LABEL) """ # generate list of formation depths and top names fm_tops = df_tops.iloc[:,0] fm_depths = df_tops.iloc[:,1] # create FM. LABEL column to well dataframe # initiate with NaNs df_well[label_name] = np.full(len(df_well), np.nan) indexes = [] topnames = [] for j in range(len(fm_depths)): # search index at which the DEPTH in the well df equals to OR # larger than the DEPTH of each pick in the pick df if (df_well[column_depth].iloc[-1] > fm_depths[j]): index = df_well.index[(df_well[column_depth] >= fm_depths[j])][0] top = fm_tops[j] indexes.append(index) topnames.append(top) # replace the NaN in the LABEL column of well df # at the assigned TOP NAME indexes df_well[label_name].loc[indexes] = topnames # Finally, using pandas "ffill" to fill all the rows # with the TOP NAMES df_well = df_well.fillna(method='ffill') return df_well
16336d8faf675940f3eafa4e7ec853751fd0f5d0
1,115
import os def cleanFiles(direct, CWD=os.getcwd()): """ removes the year and trailing white space, if there is a year direct holds the file name for the file of the contents of the directory @return list of the cleaned data """ SUBDIR = CWD + "output/" # change directory to ouput folder contents = os.listdir(SUBDIR) LOGDIR = CWD + "log/" # change directory for logging log = open(f"{LOGDIR}log.txt", "w") # opens log file for i in range(0, len(contents)): contents[i] = contents[i].strip("\n") # remove trailing \n if ( "(" in contents[i] or ")" in contents[i] ): # if '(' or ')'exists in the file name to signify if there is a year old = contents[i] # holds the name of the movie for logging purposes contents[i] = contents[i][ :-7 ] # truncates the string to remove year and trailing whitespace log.write( f"Removing date from {old} -> {contents[i]})\n" ) # writes to the log file log.close() return contents
2a16037ef15d547af8c1b947d96747b2b2d62fd1
1,116
def wrap_compute_softmax(topi_compute): """Wrap softmax topi compute""" def _compute_softmax(attrs, inputs, out_type): axis = attrs.get_int("axis") return [topi_compute(inputs[0], axis)] return _compute_softmax
3a5e3843f77d8bdfefc0f77b878f135aac4896f6
1,117
def Align4(i): """Round up to the nearest multiple of 4. See unit tests.""" return ((i-1) | 3) + 1
16ff27823c30fcc7d03fb50fe0d7dbfab9557194
1,118
import torch def initialize(X, num_clusters): """ initialize cluster centers :param X: (torch.tensor) matrix :param num_clusters: (int) number of clusters :return: (np.array) initial state """ num_samples = X.shape[1] bs = X.shape[0] indices = torch.empty(X.shape[:-1], device=X.device, dtype=torch.long) for i in range(bs): indices[i] = torch.randperm(num_samples, device=X.device) initial_state = torch.gather(X, 1, indices.unsqueeze(-1).repeat(1, 1, X.shape[-1])).reshape(bs, num_clusters, -1, X.shape[-1]).mean(dim=-2) return initial_state
a704daf3997202f4358bb9f3fbd51524fee4afe5
1,119
def parse_structure(node): """Turn a collapsed node in an OverlayGraph into a heirchaical grpah structure.""" if node is None: return None structure = node.sub_structure if structure is None: return node.name elif structure.structure_type == "Sequence": return {"Sequence" : [parse_structure(n) for n in structure.structure["sequence"]]} elif structure.structure_type == "HeadBranch": return {"Sequence" : [ {"Branch" : [parse_structure(n) for n in structure.structure["branches"]] }, parse_structure(structure.structure["head"]) ]} elif structure.structure_type == "TailBranch": return {"Sequence" : [ parse_structure(structure.structure["tail"]), {"Branch" : [parse_structure(n) for n in structure.structure["branches"]] }, ]} else: data = {} for k in structure.structure: if isinstance(structure.structure[k], list): data[k] = [parse_structure(n) for n in structure.structure[k]] else: data[k] = parse_structure(structure.structure[k]) return {structure.structure_type : data}
f9374ff9548789d5bf9b49db11083ed7a15debab
1,121
def column_ids_to_names(convert_table, sharepoint_row): """ Replace the column ID used by SharePoint by their column names for use in DSS""" return {convert_table[key]: value for key, value in sharepoint_row.items() if key in convert_table}
6ae1474823b0459f4cf3b10917286f709ddea520
1,122
import uuid def make_unique_id(): """Make a new UniqueId.""" return uuid.uuid4() # return UniqueId(uuid.uuid4())
c7ab0e5242a954db75638b3193609d49f0097287
1,123
import math def calculatePredictions(ReviewsD, userIDTest, scoreTest, simmilarities): """ Function finds userIDTest in all simmilar items and uses all the scores for prediction calculation Returns actualScore and predictedScore for further calculations of finding rmse and mse values """ score = 0 sim = 0 sumB = 0 sumN = 0 # go over entire dictionary without testing(removed) item for itemID, userScoreOther in ReviewsD.items(): # if same users were found if (userIDTest in userScoreOther): # find simmilarity and score if (itemID in simmilarities): sim = simmilarities[itemID] if (sim == -1): continue score = userScoreOther[userIDTest] # calculations for prediction sumB += (score*sim) sumN += math.fabs(sim) if (sumB != 0 and sumN != 0): print("User: ", userIDTest) print("Actual score: ", scoreTest) print("Predicted score: ", math.fabs(sumB/sumN)) actualScore = scoreTest predictedScore = math.fabs(sumB/sumN) print(" ") # if predictions are found return (actualScore, predictedScore) else: # no predictions found return None
6b74b9d6ed4855030f2f7405190788db7e0dad52
1,124
def func_2(x: float, c: float, d: float) -> float: """ Test function 2. """ return x + c + d
b95400c6779c0e64e7bb6cda493c0ee5e6f05f7c
1,125
import math def isInner(x1, y1, x2, y2, scale): """ Currently, it's a rectangular kernal Other options: rectangular f(x) = 1 if a <= scale <= b else 0 I don't get the rest of them http://saravananthirumuruganathan.wordpress.com/2010/04/01/introduction-to-mean-shift-algorithm/ """ distance = math.sqrt( ((x1-x2)**2) + ((y1-y2)**2) ) return distance <= scale
b2c715b33ae8b38fdfd19c71b54ee3980b336eeb
1,126
def wklobjective_converged(qsum, f0, plansum, epsilon, gamma): """Compute finale wkl value after convergence.""" obj = gamma * (plansum + qsum) obj += epsilon * f0 obj += - (epsilon + 2 * gamma) * plansum return obj
079841a8ee6d845cdac25a48306c023a1f38b5f7
1,127
def _get_should_cache_fn(conf, group): """Build a function that returns a config group's caching status. For any given object that has caching capabilities, a boolean config option for that object's group should exist and default to ``True``. This function will use that value to tell the caching decorator if caching for that object is enabled. To properly use this with the decorator, pass this function the configuration group and assign the result to a variable. Pass the new variable to the caching decorator as the named argument ``should_cache_fn``. :param conf: config object, must have had :func:`configure` called on it. :type conf: oslo_config.cfg.ConfigOpts :param group: name of the configuration group to examine :type group: string :returns: function reference """ def should_cache(value): if not conf.cache.enabled: return False conf_group = getattr(conf, group) return getattr(conf_group, 'caching', True) return should_cache
7a11124c640bfb3ced28e2d9395593b70dc85a0a
1,128
import json def obter_novo_username() -> str: """ -> Pede um novo nome de usuário. :return: Retorna o novo nome de usuário. """ username = input('Qual é o seu nome? ') arquivo = 'arquivos_json/nome_de_usuario.json' with open(arquivo, 'w') as obj_arq: json.dump(username, obj_arq) return username
b4d4922d68b1fb80e5a9270638d134b5806969fd
1,131
import logging def _parse_block_postheader(line): """ (209)**************!*****************!!*************... """ parts = line[1:].split(')', 1) qlen = int(parts[0]) if not len(parts[1]) == qlen: logging.warn("postheader expected %d-long query, found %d", qlen, len(parts[1])) return qlen, parts[1]
5eee6c11160c0f91cb37c025d6d265188488cad9
1,132
import os def _is_toplevel_repository_dir(directory): """Returns if a directory is a git or mercurial directory. This works by searching for a file or directory named `.git` or `.hg` in the directory. This works for both submodules and normal repositories. """ return (os.path.exists(os.path.join(directory, ".git")) or os.path.exists(os.path.join(directory, ".hg")))
25db538b6ef4f7febbdb282561885ff807f03bbe
1,133
def horizontal_move(t, h_speed=-2/320): """Probe moves horizontally at h_speed [cm/s]""" return 0.*t, h_speed*t, 2/16 + 0*t
d9cf0e5b968e7d8319b7f63f7d1d7a4666484ad3
1,134
def categories_report(x): """Returns value counts report. Parameters ---------- x: pd.Series The series with the values Returns ------- string The value counts report. str1 = False 22 | True 20 | nan 34 str2 = False (22) | True (20) | nan (34) """ # Do counting and sorting counts = x.value_counts(dropna=False) counts.index = counts.index.map(str) counts = counts.sort_index() # Create different strings str1 = ' | '.join(str(counts).split("\n")[:-1]) str2 = ' | '.join("%s (%s)" % (i, counts[i]) for i in counts.index) # Return return str2
695ccd73ee73a13e92edbdf0eb242121d136ddbb
1,135
def detect_ol(table): """Detect ordered list""" if not len(table): return False for tr in table: if len(tr)!=2: return False td1 = tr[0] # Only keep plausible ordered lists if td1.text is None: return False text = td1.text.strip() if not text or len(text)>3: return False if text[-1] not in ('.', ')'): return False if not text[:-1].isalpha() and not text[:-1].isdigit(): return False if len(td1): return False return True
b7082932fba6ba7f9634e70ea424561c084a2dc1
1,136
import six import base64 def _decode(value): """ Base64 解码,补齐"=" 记得去错多余的“=”,垃圾Docker,签发的时候会去掉 :param value: :return: """ length = len(value) % 4 if length in (2, 3,): value += (4 - length) * "=" elif length != 0: raise ValueError("Invalid base64 string") if not isinstance(value, six.binary_type): value = value.encode() return base64.urlsafe_b64decode(value)
c4a28605fb7f8a0d5110fb06738c31b030cae170
1,137
def line2dict(st): """Convert a line of key=value pairs to a dictionary. :param st: :returns: a dictionary :rtype: """ elems = st.split(',') dd = {} for elem in elems: elem = elem.split('=') key, val = elem try: int_val = int(val) dd[key] = int_val except ValueError: dd[key] = val return dd
86bb6c2e72c8a6b2a027d797de88089067ff7475
1,138
def check_structure(struct): """ Return True if the monophyly structure represented by struct is considered "meaningful", i.e. encodes something other than an unstructured polytomy. """ # First, transform e.g. [['foo'], [['bar']], [[[['baz']]]]], into simply # ['foo','bar','baz']. def denester(l): if type(l) != list: return l if len(l) == 1: return denester(l[0]) return [denester(x) for x in l] struct = denester(struct) # Now check for internal structure if not any([type(x) == list for x in struct]): # Struct is just a list of language names, with no internal structure return False return True
e07a2f39c7d3b8f2454b5171119b8698f4f58a99
1,139
import subprocess def generate_keypair(passphrase): """ Create a pair of keys with the passphrase as part of the key names """ keypath = '/tmp/test_{}_key'.format(passphrase) command = 'ssh-keygen -t rsa -b 4096 -C "{p}" -P "{p}" -f {k} -q' command = command.format(p=passphrase, k=keypath) subprocess.check_call(command, shell=True) return keypath, keypath + '.pub'
d4c8155173273feda778f5f54a4b0513353a293b
1,140
def euler(step, y0): """ Implements Euler's method for the differential equation dy/dx = 1/(2(y-1)) on the interval [0,4] """ x = [0] index_x = 0 while x[index_x] < 4: x.append(x[index_x] + step) index_x += 1 index_y = 0 y = [y0] def yprime(y): yprime = 1 / (2 * (y - 1)) return yprime while index_y < index_x: y.append(y[index_y] + step * yprime(y[index_y])) index_y += 1 return x, y
89c6e6409a1c43ce4766507fba2f401bb01cfbb8
1,142
import logging def update_softwaretitle_packages(api, jssid, pkgs): """ Update packages of software title :param jssid: Patch Software Title ID :param pkgs: dict of {version: package, ...} :returns: None """ logger = logging.getLogger(__name__) data = api.get(f"patchsoftwaretitles/id/{jssid}") title = data['patch_software_title'] title_name = title['name'] logger.info(f"updating patch software title: {title_name} ({jssid})") # single version (dict), multiple versions (list) version = title['versions']['version'] _modified = False try: # access key of single version and count on TypeError being raised v = version['software_version'] if v in pkgs.keys(): version['package'] = {'name': pkgs[v]} _modified = True except TypeError: # looks like it was actually a list for _version in version: v = _version['software_version'] if v in pkgs.keys(): _version['package'] = {'name': pkgs[v]} _modified = True if _modified: result = api.put(f"patchsoftwaretitles/id/{jssid}", data) logger.info(f"succesfully updated: {title_name}") return result else: logger.info(f"software title was not modified")
0acb3dfbff0e85a2e8a876d5e5d484c4d1e52068
1,143
def progress(self): """Check if foo can send to corge""" return True
89a0c9671645f9fa855db35bf5e383145d6b7616
1,144
def write_sample_sdf(input_file_name, valid_list): """ Function for writing a temporary file with a subset of pre-selected structures :param input_file_name: name of input file :param valid_list: list of indexes of pre-selected structures :return: name of subsampled file """ sample_file_name = '{}_sample.sdf'.format(input_file_name.split('.')[0]) sample_file = open(sample_file_name, 'w') mol = [] i = 0 for line in open(input_file_name): mol.append(line) if line[:4] == '$$$$': i += 1 if i in valid_list: for mol_line in mol: sample_file.write(mol_line) valid_list.remove(i) mol = [] else: mol = [] sample_file.close() return sample_file_name
0b22c14452f6de978e7ea811d761195d92bfe6c4
1,145
from typing import Sequence def _table(*rows: Sequence) -> str: """ >>> _table(['a', 1, 'c', 1.23]) '|a|1|c|1.23|' >>> _table(['foo', 0, None]) '|foo|||' >>> print(_table(['multiple', 'rows', 0], ['each', 'a', 'list'])) |multiple|rows|| |each|a|list| """ return '\n'.join([ '|'.join(['', *[str(cell or '') for cell in row], '']) for row in rows ])
d566da2ad9240e73b60af00d3e4b4e25607234b4
1,146
def make_range(value): """ Given an integer 'value', return the value converted into a range. """ return range(value)
385d23eaebd04249f9384e0d592b7fb3a9bbb457
1,148
def _get_security_group_id(connection, security_group_name): """ Takes a security group name and returns the ID. If the name cannot be found, the name will be attempted as an ID. The first group found by this name or ID will be used.) :param connection: :param security_group_name: :return: """ if not security_group_name: print('The bees need a security group to run under. Need to open a port from where you are to the target ' 'subnet.') return # Try by name security_groups = connection.describe_security_groups( Filters=[{'Name': 'group-name', 'Values': [security_group_name, ]}, ] ) security_groups = security_groups['SecurityGroups'] if not security_groups: # Try by id security_groups = connection.describe_security_groups( Filters=[{'Name': 'group-id', 'Values': [security_group_name, ]}, ] ) security_groups = security_groups['SecurityGroups'] if not security_groups: print('The bees need a security group to run under. The one specified was not found. ' 'Create a sg that has access to port 22 ie. from 0.0.0.0/0') return return security_groups[0]['GroupId'] if security_groups else None
70c9b8357a9634043f07ad0019ff3cc621ba859c
1,149
import difflib def lines_diff(lines1, lines2): """Show difference between lines.""" is_diff = False diffs = list() for line in difflib.ndiff(lines1, lines2): if not is_diff and line[0] in ('+', '-'): is_diff = True diffs.append(line) return is_diff, diffs
50916d46871980fadfd854dc698481a4b0f35834
1,150
import re def parse_ipmi_hpm(output): """Parse the output of the hpm info retrieved with ipmitool""" hrdw = [] line_pattern = re.compile(r'^\|[^0-9]*([0-9]+)\|[^a-zA-Z ]* ?([^\|]*)\|([^\|]*)\|([^\|]*)\|([^\|]*)\|') for line in output: match = line_pattern.match(line) if match: name = match.group(2).strip() version = match.group(3).strip().split(" ")[0] hrdw.append(('firmware', name, 'version', version)) return hrdw
001731ce46fa6bbdb5103727265a0bdd353773be
1,151
def get_genes_and_pathways(reactions, r_numbers, species): """Returns a CSV-formatted string with the list of genes and pathways where the reaction(s) of 'species' appear. :param reactions: list of reactions for species :param r_numbers: RNumbers object :param species: KEGG organism code :return: CSV-formatted string with genes and pathways where reactions of species are present """ gene_set = set() pathway_set = set() for reaction in reactions: organism = r_numbers.find(reaction).find(species) assert organism is not None for gene in organism.genes: gene_set.add(gene.replace(species + ':', '')) for pathway in organism.pathways: pathway_set.add(pathway) gene_col = ' '.join(sorted(gene_set)) pathway_col = ' '.join(sorted(pathway_set)) return gene_col.rstrip() + ';' + pathway_col.rstrip() + ';'
0ecddcaf50650b04125be73bcf6b304a77df011d
1,152
import os def datasetFiles(request): """ Return a list all dataset files in the datasets directory, by looking for files ending with .h5 suffix. eg. ['/Users/jarnyc/BioPyramid/data/datasets/lanner.1.0.h5'] """ # This is the dataset directory, set by the config file datadir = request.registry.settings['biopyramid.model.datadir'] # Go through each file in the directory and fetch files with .h5 suffix filepaths = [] for filename in os.listdir(datadir): if filename.endswith(".h5"): filepaths.append(os.path.join(datadir, filename)) return filepaths
0c4e2ffff720ec24b6f673f059baa023458f72e9
1,153
def convertHunit(conc, from_unit='H/10^6 Si', to_unit='ppm H2O', phase='Fo90', printout=True): """ Convert hydrogen concentrations to/from H/10^6 Si and ppm H2O. Based on Table 3 of Denis et al. 2013 """ if phase == 'Fo90': H_to_1_ppm = 16.35 elif phase == 'opx': H_to_1_ppm = 11.49 elif phase == 'cpx': H_to_1_ppm = 11.61 else: print('Valid options for phase are Fo90, opx, and cpx') return if from_unit == 'H/10^6 Si': if to_unit == 'ppm H2O': new_conc = conc / H_to_1_ppm elif to_unit == 'per m3': new_conc = conc * (1.0/308.67) * (1e30) else: print('only going to units "ppm H2O" and "per m3"') return elif from_unit == 'ppm H2O': if to_unit == 'H/10^6 Si': new_conc = conc * H_to_1_ppm elif to_unit == 'per m3': new_conc = (conc * H_to_1_ppm) * (1.0/308.67) * (1e30) else: print('only going to "H/10^6 Si" or "per m3"') return elif from_unit == 'per m3': if to_unit == 'H/10^6 Si': new_conc = conc / ((1.0/308.67) * (1e30)) elif to_unit == 'ppm H2O': new_conc = (conc / ((1.0/308.67) * (1e30))) / H_to_1_ppm else: print('only going to "H/10^6 Si" or "ppm H2O"') return else: print('Only going from H/10^6 Si, ppm H2O, and per m3 for now') return if printout is True: output = ' '.join(('{:.2f}'.format(conc), from_unit, '=', '{:.2f}'.format(new_conc), to_unit, 'for', phase)) print(output) return new_conc
fdd0646a09f3a2c3a8cbbc02410103caa9e023dd
1,155
import re def countBasesInFasta(fastaFile): """ Given a fasta file, return a dict where the number of records and the total number of bases are given by 'records' and 'bases' respectively. """ recordRE = re.compile(r'^>') whiteSpaceRE = re.compile(r'\s+') total_bases = 0 total_seqs = 0 with open(fastaFile) as f: for line in f: if recordRE.match(line): total_seqs += 1 continue total_bases += len(whiteSpaceRE.sub('', line)) return {'records': total_seqs, 'bases': total_bases}
45eaa5b8d36b4bae6b97bb29fdead1efc0aed8c2
1,156
def test_train_val_split(patient_id, sub_dataset_ids, cv_fold_number): """ if cv_fold_number == 1: if patient_id in sub_dataset_ids[-5:]: return 'test' elif patient_id in sub_dataset_ids[-7:-5]: return 'validation' else: return 'train' elif cv_fold_number == 2: if patient_id in sub_dataset_ids[-10:-5]: return 'test' elif patient_id in sub_dataset_ids[-12:-10]: return 'validation' else: return 'train' # used for accumulating results of tests on cv1 and cv2 if cv_fold_number == 3: if patient_id in sub_dataset_ids[-10:]: return 'test' elif patient_id in sub_dataset_ids[-12:-11]: return 'validation' else: return 'train' """ if patient_id in [1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]: return 'test' elif patient_id == 36: return 'validation' else: return 'train'
129f3856875033505555241408577f8885c9c393
1,157
def linear_search(iterable, item): """Returns the index of the item in the unsorted iterable. Iterates through a collection, comparing each item to the target item, and returns the index of the first item that is equal to the target item. * O(n) time complexity * O(1) space complexity Args: iterable: A collection that is iterable. item: An object to search for. Returns: The index of the item in the sorted iterable, or -1 if not found. Raises: TypeError: If iterable is not iterable. """ try: _ = iter(iterable) except TypeError: raise TypeError('\'{}\' object is not iterable'.format( type(iterable).__name__)) for index, _item in enumerate(iterable): if _item == item: return index return -1
bdbd7e70cea79deef1375648bde61067df1d2221
1,158
def create_MD_tag(reference_seq, query_seq): """Create MD tag Args: reference_seq (str) : reference sequence of alignment query_seq (str) : query bases of alignment Returns: md_tag(str) : md description of the alignment """ no_change = 0 md = [] for ref_base, query_base in zip(reference_seq, query_seq): if ref_base.upper() == query_base: no_change += 1 else: if no_change > 0: md.append(str(no_change)) md.append(ref_base) no_change = 0 if no_change > 0: md.append(str(no_change)) return ''.join(md)
4b711521d00af132e8e29fe4fc44785b985c2607
1,159
import re def calc_word_frequency(my_string, my_word): """Calculate the number of occurrences of a given word in a given string. Args: my_string (str): String to search my_word (str): The word to search for Returns: int: The number of occurrences of the given word in the given string. """ # Remove all non alphanumeric characters from the string filtered_string = re.sub(r'[^A-Za-z0-9 ]+', '', my_string) # Return the number of occurrences of my_word in the filtered string return filtered_string.split().count(my_word)
15ff723dd2ff089fb12cccb38283f1f75e37079d
1,160
import hashlib def intmd5(source: str, nbytes=4) -> int: """ Generate a predictive random integer of nbytes*8 bits based on a source string. :param source: seed string to generate random integer. :param nbytes: size of the integer. """ hashobj = hashlib.md5(source.encode()) return int.from_bytes(hashobj.digest()[:nbytes], byteorder="big", signed=False)
c03eb99a67af00a4a081423ecca3a724111514e1
1,161
async def async_setup(hass, config_entry): """ Disallow configuration via YAML """ return True
759cc705a82a0f9ff9d4d43cb14d641d7e552aaa
1,163
import socket def _is_rpc_timeout(e): """ check whether an exception individual rpc timeout. """ # connection caused socket timeout is being re-raised as # ThriftConnectionTimeoutError now return isinstance(e, socket.timeout)
ec832bec086b59698eed12b18b7a37e5eb541329
1,164
def html_anchor_navigation(base_dir, experiment_dir, modules): """Build header of an experiment with links to all modules used for rendering. :param base_dir: parent folder in which to look for an experiment folders :param experiment_dir: experiment folder :param modules: list of all loaded modules :return: str """ return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
1fea16c0aae2f73be713271de5f003e608cee7e9
1,165
def genBoard(): """ Generates an empty board. >>> genBoard() ["A", "B", "C", "D", "E", "F", "G", "H", "I"] """ # Empty board empty = ["A", "B", "C", "D", "E", "F", "G", "H", "I"] # Return it return empty
c47e766a0c897d3a1c589a560288fb52969c04a3
1,166
def _partition_at_level(dendrogram, level) : """Return the partition of the nodes at the given level A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest snapshot_affiliations, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the snapshot_affiliations """ partition = dendrogram[0].copy() for index in range(1, level + 1) : for node, community in partition.items() : partition[node] = dendrogram[index][community] return partition
b179127076c386480c31a18a0956eb30d5f4ef2a
1,167
import time def make_filename(): """"This functions creates a unique filename.""" unique_filename = time.strftime("%Y%m%d-%H%M%S") #unique_filename = str(uuid.uuid1()) #unique_filename = str(uuid.uuid1().hex[0:7]) save_name = 'capture_ferhat_{}.png'.format(unique_filename) return(save_name)
bf16b642884381d795148e045de2387d0acaf23d
1,168
import re def is_live_site(url): """Ensure that the tool is not used on the production Isaac website. Use of this tool or any part of it on Isaac Physics and related websites is a violation of our terms of use: https://isaacphysics.org/terms """ if re.search("http(s)?://isaac(physics|chemistry|maths|biology|science)\.org", url): return True else: return False
407624a049e92740eb82753d941780a446b1facf
1,169
def score_false(e, sel): """Return scores for internal-terminal nodes""" return e*(~sel).sum()
077cd38c6d1186e2d70fd8a93f44249b0cef2885
1,170
import logging import numpy def retrieveXS(filePath, evMin=None, evMax=None): """Open an ENDF file and return the scattering XS""" logging.info('Retrieving scattering cross sections from file {}' .format(filePath)) energies = [] crossSections = [] with open(filePath) as fp: line = fp.readline() while line[0] == '#': line = fp.readline() while line != '' and '#END' not in line: ev, xs = [float(xx) for xx in line.split()[:2]] energies.append(ev) crossSections.append(xs) line = fp.readline() logging.info('Done') energies = numpy.array(energies) crossSections = numpy.array(crossSections) bounds = energies.min(), energies.max() if evMin is None: evMin = bounds[0] else: if bounds[0] > evMin: logging.warning('Could not find requested minimum energy ' '{:.4E} eV in cross section file {}. ' 'Using minimum found: {:.4E} eV' .format(evMin, filePath, bounds[0])) evMin = bounds[0] indices = numpy.where(energies >= evMin) energies = energies[indices] crossSections = crossSections[indices] if evMax is None: evMax = bounds[1] else: if bounds[1] < evMax: logging.warning('Could not find requested maximum energy ' '{:.4E} eV in cross section file {}. ' 'Using maximum found: {:.4E} eV' .format(evMax, filePath, bounds[1])) evMax = bounds[1] indices = numpy.where(energies <= evMax) energies = energies[indices] crossSections = crossSections[indices] return energies, crossSections
388986facd75540983870f1f7e0a6f51b6034271
1,171
import string def _parse_java_simple_date_format(fmt): """ Split a SimpleDateFormat into literal strings and format codes with counts. Examples -------- >>> _parse_java_simple_date_format("'Date:' EEEEE, MMM dd, ''yy") ['Date: ', ('E', 5), ', ', ('M', 3), ' ', ('d', 2), ", '", ('y', 2)] """ out = [] quoted = False prev_c = None prev_count = 0 literal_text = '' k = 0 while k < len(fmt): c = fmt[k] k += 1 if not quoted and c == "'" and k < len(fmt) and fmt[k] == "'": # Repeated single quote. if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c k += 1 continue if c == "'": if not quoted: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 if literal_text: out.append(literal_text) literal_text = '' quoted = not quoted continue if quoted: literal_text += c continue if c not in string.ascii_letters: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c continue if c not in 'GyMdhHmsSEDFwWakKzZ': raise ValueError(f"unknown format character {c}") if literal_text != '': out.append(literal_text) literal_text = '' if prev_c is not None and c != prev_c: out.append((prev_c, prev_count)) prev_count = 0 prev_c = c prev_count += 1 else: if quoted: raise ValueError("missing closing quote; input ends " f"with '{literal_text}") if literal_text != '': out.append(literal_text) elif prev_c is not None: out.append((prev_c, prev_count)) return out
3fe42e4fc96ee96c665c3c240cb00756c8534c84
1,172
import logging def rekey_by_sample(ht): """Re-key table by sample id to make subsequent ht.filter(ht.S == sample_id) steps 100x faster""" ht = ht.key_by(ht.locus) ht = ht.transmute( ref=ht.alleles[0], alt=ht.alleles[1], het_or_hom_or_hemi=ht.samples.het_or_hom_or_hemi, #GQ=ht.samples.GQ, HL=ht.samples.HL, S=ht.samples.S, ) ht = ht.key_by(ht.S) ht = ht.transmute( chrom=ht.locus.contig.replace("chr", ""), pos=ht.locus.position ) logging.info("Schema after re-key by sample:") ht.describe() return ht
3e879e6268017de31d432706dab9e672e85673aa
1,173
def _format_stages_summary(stage_results): """ stage_results (list of (tuples of (success:boolean, stage_name:string, status_msg:string))) returns a string of a report, one line per stage. Something like: Stage: <stage x> :: SUCCESS Stage: <stage y> :: FAILED Stage: <stage z> :: SUCCESS """ #find the longest stage name to pad report lines max_name_len = 0 for entry in stage_results: x, stage_name, y = entry name_len = len(stage_name) if name_len > max_name_len: max_name_len = name_len summary = "" for entry in stage_results: x, stage_name, status_msg = entry summary += 'Stage: ' + stage_name.ljust(max_name_len) + ":: " summary += status_msg + '\n' return summary
2f5c757342e98ab258bdeaf7ffdc0c5d6d4668ca
1,174
def read_hdr(name, order='C'): """Read hdr file.""" # get dims from .hdr h = open(name + ".hdr", "r") h.readline() # skip line l = h.readline() h.close() dims = [int(i) for i in l.split()] if order == 'C': dims.reverse() return dims
57daadfdf2342e1e7ef221cc94f2e8f70c504944
1,176
import socket def canonical_ipv4_address(ip_addr): """Return the IPv4 address in a canonical format""" return socket.inet_ntoa(socket.inet_aton(ip_addr))
edacc70ccc3eef12030c4c597c257775d3ed5fa4
1,177
def make_chained_transformation(tran_fns, *args, **kwargs): """Returns a dataset transformation function that applies a list of transformations sequentially. Args: tran_fns (list): A list of dataset transformation. *args: Extra arguments for each of the transformation function. **kwargs: Extra keyword arguments for each of the transformation function. Returns: A transformation function to be used in :tf_main:`tf.data.Dataset.map <data/Dataset#map>`. """ def _chained_fn(data): for tran_fns_i in tran_fns: data = tran_fns_i(data, *args, **kwargs) return data return _chained_fn
5f24e030df74a0617e633ca8f8d4a3954674b001
1,179
def increment(i,k): """ this is a helper function for a summation of the type :math:`\sum_{0 \leq k \leq i}`, where i and k are multi-indices. Parameters ---------- i: numpy.ndarray integer array, i.size = N k: numpy.ndarray integer array, k.size = N Returns ------- changes k on return Example ------- k = [1,0,1] i = [2,0,2] increment(i, k) # changes k to [1,0,2] increment(i, k) # changes k to [2,0,0] increment(i, k) # changes k to [2,0,1] """ carryover = 1 if len(k) != len(i): raise ValueError('size of i and k do not match up') for n in range(len(k))[::-1]: if i[n] == 0: continue tmp = k[n] + carryover # print 'tmp=',tmp carryover = tmp // (i[n]+1) # print 'carryover=',carryover k[n] = tmp % (i[n]+1) if carryover == 0: break return k
1ac8ef592376fbfa0d04cdd4b1c6b29ad3ed9fbd
1,180
def generate_outlier_bounds_iqr(df, column, multiplier=1.5): """ Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the values in that column that signify outliers. """ q1 = df[column].quantile(.25) q3 = df[column].quantile(.75) iqr = q3 - q1 upper = q3 + (multiplier * iqr) lower = q1 - (multiplier * iqr) return upper, lower
7f096d5f5cf2417cbc161713715a39560efd140a
1,182
import random def generate_data(Type): """ 随机生成CAN帧中所包含的数据 :param Type: 需要生成数据的类型 :return: 生成的随机数据序列,长度为8,如['88', '77', '55', '44', '22', '11', '33'', '44'] """ data = [] if Type == 1: # 生成反馈帧单体电池Cell1-24电压信息 standard_vol = 35 offset = random.randint(0, 15) max_vol = standard_vol + offset min_vol = standard_vol - offset // 2 data.append('44') data.append(str(max_vol)) data.append('44') data.append(str(min_vol)) offset = random.randint(0, 15) max_vol = standard_vol + offset min_vol = standard_vol - offset // 2 data.append('44') data.append(str(max_vol)) data.append('44') data.append(str(min_vol)) elif Type == 2: # 生成反馈帧单体电池Cell1-8温度信息 stanard_temp = 45 offest = random.randint(0, 20) max_temp = stanard_temp + offest min_temp = stanard_temp - offest - 5 data.append(str(max_temp)) data.append('6c') data.append(str(min_temp)) data.append('6c') offest = random.randint(0, 20) max_temp = stanard_temp + offest min_temp = stanard_temp - offest - 5 data.append(str(max_temp)) data.append('6c') data.append(str(min_temp)) data.append('6c') elif Type == 3: # 生成反馈帧单体电池最高最低电压温度信息 standard_vol = 35 standard_temp = 45 vol_offset = random.randint(0, 15) temp_offset = random.randint(0, 20) max_temp = standard_temp + temp_offset min_temp = standard_temp - temp_offset - 5 max_vol = standard_vol + vol_offset min_vol = standard_vol - vol_offset // 2 data.append('44') data.append(str(max_vol)) data.append('44') data.append((str(min_vol))) data.append(str(max_temp)) data.append('5c') data.append(str(min_temp)) data.append('5c') elif Type == 4: # 生成常发帧系统电压信息 standard_vol = 55 offset = random.randint(0, 10) max_vol = standard_vol * offset * 10 min_vol = standard_vol - offset - 5 data.append('c5') data.append(str(max_vol)) data.append('f2') data.append(str(min_vol)) data.append('ed') for i in range(3): data.append(str(standard_vol + 5 * i)) elif Type == 5: pass else: pass return data
3a920be4b7ef5c5c3e258b3e3c79bc028004179a
1,183
def counting_sort(array): """ SORTING FUNCTION USING COUNTING SORT ALGORITHM ARG array = LIST(ARRAY) OF NUMBERS """ ## counter lists has elements for every maximum = max(array) counter = [0]*(maximum+1) for i in range(len(array)): counter[array[i]] += 1 for i in range(1, maximum + 1): counter[i] = counter[i] + counter[i-1] #print_array(counter) result = [0]*len(array) for i in range(len(array)): result[counter[array[i]] -1] = array[i] counter[array[i]] -= 1 return result
986e2f9277fa71dcd9897ac409653009c651c49f
1,184
import math from PIL import ImageColor def indexedcolor(i, num, npersat=15, lightness=60): """Returns an rgb color triplet for a given index, with a finite max 'num'. Thus if you need 10 colors and want to get color #5, you would call this with (5, 10). The colors are "repeatable". """ nsats = int(math.ceil(num/float(npersat))) sat = 100 - int((i//npersat)*(100/nsats)) l = lightness nhues = int(math.ceil(num/float(nsats))) hue = (i % nhues) * (360//nhues) #print >>sys.stderr, 'For i %d, num %d, got %d sats, %d hues -> %d, %d, %d' % (i, num, nsats, nhues, hue, sat, l) return ImageColor.getrgb('hsl(%d,%d%%,%d%%)' % (hue, sat, l))
418a875bc8ae50ce21f9667f46718863ba0f55e3
1,185
def dot_to_dict(values): """Convert dot notation to a dict. For example: ["token.pos", "token._.xyz"] become {"token": {"pos": True, "_": {"xyz": True }}}. values (iterable): The values to convert. RETURNS (dict): The converted values. """ result = {} for value in values: path = result parts = value.lower().split(".") for i, item in enumerate(parts): is_last = i == len(parts) - 1 path = path.setdefault(item, True if is_last else {}) return result
a2c56a01b179d27eabc728d6ff2ec979885d5feb
1,186
def hexagonal_numbers(length: int) -> list[int]: """ :param len: max number of elements :type len: int :return: Hexagonal numbers as a list Tests: >>> hexagonal_numbers(10) [0, 1, 6, 15, 28, 45, 66, 91, 120, 153] >>> hexagonal_numbers(5) [0, 1, 6, 15, 28] >>> hexagonal_numbers(0) Traceback (most recent call last): ... ValueError: Length must be a positive integer. """ if length <= 0 or not isinstance(length, int): raise ValueError("Length must be a positive integer.") return [n * (2 * n - 1) for n in range(length)]
632e60505cb17536a17b20305a51656261e469f5
1,187
def check_add_role(store, id, name): """ Checks if role exist and then adds record if it doesn't """ role = store.find_role(name) if role == None: return store.create_role(id=id, name=name) else: return role
c8680158cc005bf7a278951774b9fe0a733fc8c6
1,188
from pathlib import Path def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path: """ Removes a base path from an item, and appends result to a new path :param base_path: The :py:class:`pathlib.Path` to be removed from `item_path` :param item_path: The :py:class:`pathlib.Path` to be delta-ed :param new_base_path: The new base :py:class:`pathlib.Path` for `item_path`. :raises ValueError: If base_path is not a sub-path of item_path. :return: The new combined path. """ path_stub = item_path.relative_to(base_path) new_item_path = new_base_path / path_stub return new_item_path
ec531a011e36f053a8092525faae2047f5f66ccc
1,189
def sum_to_scalar(*args): """Adding losses/nmsks together that were evaluated in parallel""" new_args = list() for arg in args: new_args.append({k: v.sum() for (k, v) in arg.items()}) return new_args
a4264911962c7bf3432735f8872522e193ceec8f
1,191
def plural_suffix(count: int) -> str: """"s" when count is not one""" suffix = '' if count != 1: suffix = 's' return suffix
950002d57560d06e93e08647ff17d885688bca87
1,193
import argparse import sys def parse_args() -> argparse.Namespace: """ Parse program arguments :return: Parser values """ parser = argparse.ArgumentParser(description="") parser.add_argument("-a", action="store_true") parser.add_argument("-c", action="store_true") parser.add_argument("-x", action="store_true") parser.add_argument("-z", action="store_true") parser.add_argument("-s", metavar="SET", nargs="*", type=str) parser.add_argument("--skip-keys", action="store_true") parser.add_argument("--skip-sets", metavar="SET", nargs="*", type=str) parser.add_argument("--skip-cache", action="store_true") # Ensure there are args if len(sys.argv) < 2: parser.print_usage() sys.exit(1) return parser.parse_args()
9d86d37d94af5c8ff128c4da8226f15728b0da70
1,194
import networkx def compute_participants(matches, challonge_data): """Compute series participants. Iterate all matches and players to create a graph. Apply connected components algorithm to resolve distinct participant groups over all matches. Sort participant groups by number of wins to correlate with Challonge participant data (which also includes number of wins). Note that edge cases exist that are not covered. For example, teams sometimes field a 1v1 player for a single match. If neither player in the 1v1 match takes part in any other matches, the players can't be placed in a participant group and their win is not counted. There are two consequences: 1. Not counting a win may make the number of wins between participants even, in which case we don't know which participant group won the series. 2. Not grouping a player means the participant player list will be incomplete. """ graph = networkx.DiGraph() win_id = 0 platform_ids = [] name_to_user = {} for match in matches: # Record a win win_id += 1 graph.add_node(win_id, type='win') # Record platform ID platform_ids.append(match['platform_id']) # Add node for each player for player in match['players']: name_to_user[player['name']] = player['user_id'] graph.add_node(player['name'], type='player') # Can happen for incomplete matches if match['winning_team'] is None: continue # Connect winning players to recorded win for player in match['winning_team']['players']: graph.add_edge(player['name'], win_id) # Connect all players on the same team for team in match['teams']: for i in team['players']: for j in team['players']: graph.add_edge(i['name'], j['name']) mgz_data = [{ 'wins': len([node for node in g if graph.nodes[node]['type'] == 'win']), 'players': [node for node in g if graph.nodes[node]['type'] == 'player'] } for g in networkx.weakly_connected_components(graph)] return [{ 'user_ids': [name_to_user[n] for n in mgz['players']], 'winner': challonge['winner'], 'name': challonge['name'], 'score': challonge['score'], 'platform_id': platform_ids[0] } for mgz, challonge in zip( sorted(mgz_data, key=lambda k: -1 * k['wins']), sorted(challonge_data, key=lambda k: -1 * k['score'] if k['score'] else 0) )]
a715773d5edd3b4d6852096c665070e64bef1165
1,195
import random def describe_current_subtask(subtask, prefix=True): """ Make a 'natural' language description of subtask name """ to_verb = {"AnswerQuestion": "answering a question", "ArmGoal": "moving my arm", "DemoPresentation": "giving a demo", "Find": "finding", "Follow": "following", "Guide": "guiding", "GripperGoal": "moving my gripper", "HandOver": "handing something over", "Inspect": "inspecting", "LookAt": "looking", "NavigateTo": "navigating", "PickUp": "picking up", "Place": "placing", "ResetWM": "resetting my world model", "Say": "speaking", "SendPicture": "sending a picture", "TurnTowardSound": "turning towards a sound"} description = to_verb.get(subtask, subtask + "ing") if prefix: description = random.choice(["I'm busy", "I'm"]) + " " + description return description
628c699201c26242bd72c6066cba07cce54b14ca
1,197
def addprint(x: int, y: int): """Print and "added" representation of `x` and `y`.""" expr = x + y return "base addprint(x=%r, y=%r): %r" % (x, y, expr)
e3f735afc1d4826a1af7210c3cec88c8b8c87dfe
1,198
import re def parse_date(deadline_date): """ Given a date in the form MM/DD/YY or MM/DD/YYYY, returns the integers MM, DD, and YYYY (or YY) in this order. """ deadline_split = re.split('\\/|\\-', deadline_date) return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2])
0ded6bccce8437aad61cfa5ff121c5ed0595849b
1,199
import re def get_file_name(part): """get file name using regex from fragment ID""" return re.findall(r"='(.*\-[a-z]+).*", part)[0]
30c8867d8e14b04c593359f1c16d9bf324711ba0
1,201
import math def generate_sphere_points(n): """ Returns list of 3d coordinates of points on a sphere using the Golden Section Spiral algorithm. """ points = [] inc = math.pi * (3 - math.sqrt(5)) offset = 2 / float(n) for k in range(int(n)): y = k * offset - 1 + (offset / 2) r = math.sqrt(1 - y*y) phi = k * inc points.append([math.cos(phi)*r, y, math.sin(phi)*r]) return points
bd6c7624220f7928a44f6dcb24b7112e8d803eb4
1,202
def expand_locations(ctx, input, targets = []): """Expand location templates. Expands all `$(execpath ...)`, `$(rootpath ...)` and deprecated `$(location ...)` templates in the given string by replacing with the expanded path. Expansion only works for labels that point to direct dependencies of this rule or that are explicitly listed in the optional argument targets. See https://docs.bazel.build/versions/main/be/make-variables.html#predefined_label_variables. Use `$(rootpath)` and `$(rootpaths)` to expand labels to the runfiles path that a built binary can use to find its dependencies. This path is of the format: - `./file` - `path/to/file` - `../external_repo/path/to/file` Use `$(execpath)` and `$(execpaths)` to expand labels to the execroot (where Bazel runs build actions). This is of the format: - `./file` - `path/to/file` - `external/external_repo/path/to/file` - `<bin_dir>/path/to/file` - `<bin_dir>/external/external_repo/path/to/file` The deprecated `$(location)` and `$(locations)` expansions returns either the execpath or rootpath depending on the context. Args: ctx: context input: String to be expanded targets: List of targets for additional lookup information. Returns: The expanded path or the original path """ return ctx.expand_location(input, targets = targets)
efa482d928484b7d6f9c8acbf81e0a3d5b4cd50f
1,203
def black_color_func(word, font_size, position, orientation, random_state=None, **kwargs): """Make word cloud black and white.""" return("hsl(0,100%, 1%)")
d5e874a4f62d30abcba29476d0ba7fc3a31b0ca6
1,210
def detect_label_column(column_names): """ Detect the label column - which we display as the label for a joined column. If a table has two columns, one of which is ID, then label_column is the other one. """ if (column_names and len(column_names) == 2 and "id" in column_names): return [c for c in column_names if c != "id"][0] return None
40524e7ed0878316564ad8fd66a2c09fc892e979
1,211
def table(custom_headings, col_headings_formatted, rows, spec): """ Create a LaTeX table Parameters ---------- custom_headings : None, dict optional dictionary of custom table headings col_headings_formatted : list formatted column headings rows : list of lists of cell-strings Data in the table, pre-formatted spec : dict options for the formatter Returns ------- dict : contains key 'latex', which corresponds to a latex string representing the table """ longtables = spec['longtables'] table = "longtable" if longtables else "tabular" if custom_headings is not None \ and "latex" in custom_headings: latex = custom_headings['latex'] else: latex = "\\begin{%s}[l]{%s}\n\hline\n" % \ (table, "|c" * len(col_headings_formatted) + "|") latex += ("%s \\\\ \hline\n" % (" & ".join(col_headings_formatted))) for formatted_rowData in rows: if len(formatted_rowData) > 0: formatted_rowData_latex = [ (formatted_cell['latex'] if isinstance(formatted_cell, dict) else formatted_cell) for formatted_cell in formatted_rowData] latex += " & ".join(formatted_rowData_latex) #MULTI-ROW support for *data* (non-col-header) rows of table. Currently # unused (unneeded) - see multirow formatter that is commented out in formatters.py #multirows = [ ("multirow" in el) for el in formatted_rowData_latex ] #if any(multirows): # latex += " \\\\ " # last = True; lineStart = None; col = 1 # for multi,data in zip(multirows,formatted_rowData_latex): # if last == True and multi == False: # lineStart = col #line start # elif last == False and multi == True: # latex += "\cline{%d-%d} " % (lineStart,col) #line end # last=multi # res = _re.search("multicolumn{([0-9])}",data) # if res: col += int(res.group(1)) # else: col += 1 # if last == False: #need to end last line # latex += "\cline{%d-%d} "%(lineStart,col-1) # latex += "\n" #else: latex += " \\\\ \hline\n" latex += "\end{%s}\n" % table return {'latex': latex}
0ca28fce26fc7476aa5b88a621c5476ae8d381ce
1,213
def conflict(next_x: int, s: tuple) -> bool: """Return a boolean that defines the conflict condition of the next queen's position""" next_i = len(s) for i in range(next_i): if abs(s[i] - next_x) in (0, next_i - i): return True else: return False
cc29b142e1cc799c0a305523b713c5085af25fd0
1,214
from typing import List def split_to_sublists(initial_list:list, n:int, strict:bool=True) -> List[list]: """Takes a list and splits it into sublists of size n Parameters ---------- initial_list : list The initial list to split into sublists n : int The size of each sublist strict: bool Whether to force an error if the length of the initial list is not divisible by n (split into even groups), default True Returns ------- List[list] A list of lists of size n (unless strict is False, then the last list may be > n) Examples -------- ### Split gallery images into sublists of 3 #### JINJA USAGE ```jinja2 {% if gallery|length % 3 == 0 %} {% for sublist in gallery|split_to_sublists(3) %} <div class="row"> <div class="col-md-4"> <img src="{{ sublist.0[0]['file_path'] }}" alt="{{ sublist.0[0]['file_path'].split()[-1] }}"> </div> <div class="col-md-4"> <img src="{{ sublist.1[0]['file_path'] }}" alt="{{ sublist.1[0]['file_path'].split()[-1]}}"> </div> <div class="col-md-4"> <img src="{{ sublist.2[0]['file_path'] }}" alt="{{ sublist.2[0]['file_path'].split()[-1] }}"> </div> </div> {% endfor %} {% endif } ``` The above jinja is roughly equivalent to something like this in pure python: ```python gallery = ["image 1" , "image 2", "image 3", "image 4" , "image 5", "image 6"] if len(images) % 3 == 0: for sublist in split_to_sublists(gallery, 3): # Returns [["image 1" , "image 2", "image 3"], ["image 4" , "image 5", "image 6"]] ... # Do stuff with each sublist ``` """ if strict: if not len(initial_list) % n == 0: raise ValueError(f"Provided list was not of correct size: \n\tList: {initial_list}\n\tSegment size {n}") result = [] for i in range(0, len(initial_list), n): # Create sublists up to size n result.append( initial_list[i:i + n]) return result
fcca74f9814020c99aaf8b31f092ca3ca9533216
1,215
import os import re def get_matched_files(dirPath=".", regex=None): """Get the abspath of the files whose name matches a regex Only files will be returned, and directories are excluded. Args: dirPath (str): the directory to search regex (regex): the regular expression to match the filename Returns: tuple of strings """ # check the exisitence of path fns = [] _absDir = os.path.abspath(dirPath) if os.path.isdir(_absDir): for i in os.listdir(_absDir): if regex != None: if not re.match(regex, i): continue _fpath = os.path.join(_absDir, i) if os.path.isfile(_fpath): fns.append(_fpath) return tuple(fns)
118cf628b54f50b2c41c1885bcf000a741966086
1,216
def sectionize(parts, first_is_heading=False): """Join parts of the text after splitting into sections with headings. This function assumes that a text was splitted at section headings, so every two list elements after the first one is a heading-section pair. This assumption is used to join sections with their corresponding headings. Parameters ---------- parts : list of str List of text parts. first_is_heading : bool Should first element be treated as heading in lists of length greater than 1. """ parts = parts.copy() if len(parts) <= 1: return parts first = [] if not first_is_heading: first.append(parts[0]) del parts[0] sections = first + [ "\n".join(parts[i:i+2]) for i in range(0, len(parts), 2) ] return sections
402832d55268dc808888f94b95e3a1c991394041
1,217
def byte_compare(stream_a, stream_b): """Byte compare two files (early out on first difference). Returns: (bool, int): offset of first mismatch or 0 if equal """ bufsize = 16 * 1024 equal = True ofs = 0 while True: b1 = stream_a.read(bufsize) b2 = stream_b.read(bufsize) if b1 != b2: equal = False if b1 and b2: # we have two different buffers: find first mismatch for a, b in zip(b1, b2): if a != b: break ofs += 1 break ofs += len(b1) if not b1: # both buffers empty break return (equal, ofs)
59adfe50fefdb79edd082a35437018d4b954ec75
1,218
import re def is_regex(param): """ 判断参数是否是合法正则表达式字符串 :param param: {String} 参数 :return: {Boolean} 是否是合法正则表达式 """ try: re.compile(param) return True except re.error: return False
6a3ee33e68e33d3557db546beadc005235360080
1,219
def min_count1(lst): """ Get minimal value of list, version 1 :param lst: Numbers list :return: Minimal value and its count on the list """ if len(lst) == 0: return [] count = 0 min_value = lst[0] for num in lst: if num == min_value: count += 1 elif num < min_value: count = 1 min_value = num return [min_value, count]
b441d0a37534909e9a990b91a953d4022698c04b
1,220
def exactly_one_topping(ketchup, mustard, onion): """Return whether the customer wants exactly one of the three available toppings on their hot dog. """ return True if int(ketchup) + int(mustard) + int(onion) == 1 else False
214c95d35c116993dc78740d5d16b874122960ed
1,221
def strip_line_endings(data: list) -> list: """Removes line endings(\n). Removes item if only contains \n.""" return [i.rstrip("\n") for i in data if i != "\n"]
5383b1bc3884395459ca63b6f15c0a1091eaaaf0
1,222
def weighting_system_z(): """Z-weighting filter represented as polynomial transfer function. :returns: Tuple of `num` and `den`. Z-weighting is 0.0 dB for all frequencies and therefore corresponds to a multiplication of 1. """ numerator = [1] denomenator = [1] return numerator, denomenator
8d84c572631c23f50f8a57e388e21fa62e316930
1,223
def denormalize(series, last_value): """Denormalize the values for a given series. This uses the last value available (i.e. the last closing price of the week before our prediction) as a reference for scaling the predicted results. """ result = last_value * (series + 1) return result
f4c32aa4248378482f1294c54e706e6ee8d5332d
1,224