content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def combined_loss(x, reconstructed_x, mean, log_var, args): """ MSE loss for reconstruction, KLD loss as per VAE. Also want to output dimension (element) wise RCL and KLD """ # First, binary data loss1 = torch.nn.BCEWithLogitsLoss(size_average=False) loss1_per_element = torch.nn.BCEWithLogitsLoss( size_average=False, reduce=False ) binary_range = args.binary_real_one_hot_parameters['binary_range'] reconstructed_x1 = reconstructed_x[:, binary_range[0]: binary_range[1]] x1 = x[:, binary_range[0]: binary_range[1]] RCL1 = loss1(reconstructed_x1, x1) RCL1_per_element = loss1_per_element(reconstructed_x1, x1) # Next, real data loss2 = torch.nn.MSELoss(size_average=False) loss2_per_element = torch.nn.MSELoss(size_average=False, reduce=False) real_range = args.binary_real_one_hot_parameters['real_range'] reconstructed_x2 = reconstructed_x[:, real_range[0]: real_range[1]] x2 = x[:, real_range[0]: real_range[1]] RCL2 = loss2(reconstructed_x2, x2) RCL2_per_element = loss2_per_element(reconstructed_x2, x2) # Next, one-hot data loss3 = torch.nn.CrossEntropyLoss(size_average=True) loss3_per_element = torch.nn.CrossEntropyLoss( size_average=True, reduce=False ) one_hot_range = args.binary_real_one_hot_parameters['one_hot_range'] reconstructed_x3 = reconstructed_x[:, one_hot_range[0]: one_hot_range[1]] x3 = x[:, one_hot_range[0]: one_hot_range[1]] # This has 3 one-hot's. lets split it up x3_1 = x3[:, :19] x3_2 = x3[:, 19:19 + 19] x3_3 = x3[:, 19+19:] reconstructed_x3_1 = reconstructed_x3[:, :19] reconstructed_x3_2 = reconstructed_x3[:, 19:19 + 19] reconstructed_x3_3 = reconstructed_x3[:, 19+19:] _, labels1 = x3_1.max(dim=1) _, labels2 = x3_2.max(dim=1) _, labels3 = x3_3.max(dim=1) # print(labels.size(), reconstructed_x3.size(), x3.size()) RCL3_1 = loss3(reconstructed_x3_1, labels1.long()) RCL3_per_element_1 = loss3_per_element(reconstructed_x3_1, labels1.long()) RCL3_2 = loss3(reconstructed_x3_2, labels2.long()) RCL3_per_element_2 = loss3_per_element(reconstructed_x3_2, labels2.long()) RCL3_3 = loss3(reconstructed_x3_3, labels3.long()) RCL3_per_element_3 = loss3_per_element(reconstructed_x3_3, labels3.long()) KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp()) KLD_per_element = -0.5 * (1 + log_var - mean.pow(2) - log_var.exp()) RCL = RCL1 + RCL2 + RCL3_1 + RCL3_2 + RCL3_3 RCL_per_element = torch.cat( ( RCL1_per_element, RCL2_per_element, RCL3_per_element_1.view([-1, 1]), RCL3_per_element_2.view([-1, 1]), RCL3_per_element_3.view([-1, 1]) ), 1 ) return RCL + args.beta_vae*KLD, RCL, KLD, RCL_per_element, KLD_per_element
162b2706f9643f66ebb0c3b000ea025d411029e2
3,097
def isfloat(string: str) -> bool: """ This function receives a string and returns if it is a float or not. :param str string: The string to check. :return: A boolean representing if the string is a float. :rtype: bool """ try: float(string) return True except (ValueError, TypeError): return False
ac6d8fcbbcf6b8cb442c50895576f417618a7429
3,098
def hook(t): """Calculate the progress from download callbacks (For progress bar)""" def inner(bytes_amount): t.update(bytes_amount) # Update progress bar return inner
d8228b9dec203aaa32d268dea8feef52e8db6137
3,102
import gzip import pickle def load_object(filename): """ Load saved object from file :param filename: The file to load :return: the loaded object """ with gzip.GzipFile(filename, 'rb') as f: return pickle.load(f)
f7e15216c371e1ab05169d40ca4df15611fa7978
3,106
def response_map(fetch_map): """Create an expected FETCH response map from the given request map. Most of the keys returned in a FETCH response are unmodified from the request. The exceptions are BODY.PEEK and BODY partial range. A BODY.PEEK request is answered without the .PEEK suffix. A partial range (e.g. BODY[]<0.1000>) has the octet count (1000) removed, since that information is provided in the literal size (and may be different if the data was truncated). """ if not isinstance(fetch_map, dict): fetch_map = dict((v, v) for v in fetch_map) rmap = {} for k, v in fetch_map.items(): for name in ('BODY', 'BINARY'): if k.startswith(name): k = k.replace(name + '.PEEK', name, 1) if k.endswith('>'): k = k.rsplit('.', 1)[0] + '>' rmap[k] = v return rmap
42d992662e5bba62046c2fc1a50f0f8275798ef8
3,107
import numpy def img_to_vector(img_fn, label=0): """Read the first 32 characters of the first 32 rows of an image file. @return <ndarray>: a 1x(1024+1) numpy array with data and label, while the label is defaults to 0. """ img = "" for line in open(img_fn).readlines()[:32]: img += line[:32] # labels are always attached at the last position itera = [_ for _ in img + str(label)] return numpy.fromiter(itera, "f4")
f1d7161a0bc4d6ffebc6ee1b32eafb28c4d75f7f
3,108
import json def from_config(func): """Run a function from a JSON configuration file.""" def decorator(filename): with open(filename, 'r') as file_in: config = json.load(file_in) return func(**config) return decorator
4342a5f6fab8f8274b9dfb762be3255672f4f332
3,109
def examine_mode(mode): """ Returns a numerical index corresponding to a mode :param str mode: the subset user wishes to examine :return: the numerical index """ if mode == 'test': idx_set = 2 elif mode == 'valid': idx_set = 1 elif mode == 'train': idx_set = 0 else: raise NotImplementedError return idx_set
4fee6f018cacff4c760cb92ef250cad21b497697
3,110
def add_classification_categories(json_object, classes_file): """ Reads the name of classes from the file *classes_file* and adds them to the JSON object *json_object*. The function assumes that the first line corresponds to output no. 0, i.e. we use 0-based indexing. Modifies json_object in-place. Args: json_object: an object created from a json in the format of the detection API output classes_file: the list of classes that correspond to the output elements of the classifier Return: The modified json_object with classification_categories added. If the field 'classification_categories' already exists, then this function is a no-op. """ if ('classification_categories' not in json_object.keys()) or (len(json_object['classification_categories']) == 0): # Read the name of all classes with open(classes_file, 'rt') as fi: class_names = fi.read().splitlines() # remove empty lines class_names = [cn for cn in class_names if cn.strip()] # Create field with name *classification_categories* json_object['classification_categories'] = dict() # Add classes using 0-based indexing for idx, name in enumerate(class_names): json_object['classification_categories']['%i'%idx] = name else: print('WARNING: The input json already contains the list of classification categories.') return json_object
ef92902210f275238271c21e20f8f0eec90253b0
3,111
import os def _DevNull(): """On Windows, sometimes the inherited stdin handle from the parent process fails. Workaround this by passing null to stdin to the subprocesses commands. This function can be used to create the null file handler. """ return open(os.devnull, 'r')
dc815c172fd45dee4b0ed47cbd9497ce7e643972
3,112
def sumdigits(a: int): """Sum of the digits of an integer""" return sum(map(int, str(a)))
018bcc429e6ea3842fd9e9e2580820aed29bc0aa
3,113
def get_hostname(): """Returns the hostname, from /etc/hostname.""" hostname = "" try: with open('/etc/hostname') as f: hostname = f.read().rstrip() if len(hostname) == 0: hostname = "Unknown" except: hostname = "Unknown" return hostname
4cd4ffc1c8c56bc2e440443fdbc315d27fb94033
3,114
def is_valid_body(val): """Body must be a dictionary.""" return isinstance(val, dict)
ef3a605e1e84ce9d74f77c07799d1abb58aaf61a
3,115
def _vba_to_python_op(op, is_boolean): """ Convert a VBA boolean operator to a Python boolean operator. """ op_map = { "Not" : "not", "And" : "and", "AndAlso" : "and", "Or" : "or", "OrElse" : "or", "Eqv" : "|eq|", "=" : "|eq|", ">" : ">", "<" : "<", ">=" : ">=", "=>" : ">=", "<=" : "<=", "=<" : "<=", "<>" : "|neq|", "is" : "|eq|" } if (not is_boolean): op_map["Not"] = "~" op_map["And"] = "&" op_map["AndAlso"] = "&" op_map["Or"] = "|" op_map["OrElse"] = "|" return op_map[op]
a6ed0c65c6c2d2635f14fb664540eaf283ee4065
3,116
def get_label_number(window): """This method assigns to each label of a window a number.""" mode_list = ["bike", "car", "walk", "bus", "train"] current_label_number = 0 for mode in enumerate(mode_list): if window[1] == mode[1]: current_label_number = mode[0] return current_label_number
5ed3c683e8619e1b07857992f54079bc68fdfa58
3,117
import copy import json import os def remap_classes(dataset, class_map): """ Replaces classes of dataset based on a dictionary""" class_new_names = list(set(class_map.values())) class_new_names.sort() # NOTE sort() is a NoneType return method, it sorts the list without outputting new vars class_originals = copy.deepcopy(dataset['categories']) dataset['categories'] = [] # removing all dependencies class_ids_map = {} # map from old id to new id # Check whether the category has background or not, assign index 0. Useful for panoptic segmentation. has_background = False if 'Background' in class_new_names: # Check whether the backgroun category has index zero. if class_new_names.index('Background') != 0: class_new_names.remove('Background') class_new_names.insert(0, 'Background') has_background = True # Catching duplicates - TACO had duplicates for id 4040 and 309. Re-id'd id_ann_all = [] id_ann_repeated = [] for index_old, ann_old in enumerate(dataset['annotations']): if ann_old['id'] in id_ann_all: # if found a duplicate, re-id at the end id_ann_repeated.append(ann_old['id']) ann_old['id'] = len(dataset['annotations'])+len(id_ann_repeated)-1 else: id_ann_all.append(ann_old['id']) print(f'Found {len(id_ann_repeated)} annotations repeated.' f'\nPlease double check input file, annotation id(s) {id_ann_repeated} are duplicated!\n') # Replace categories, iterating through every class name for id_new, class_new_name in enumerate(class_new_names): # Make sure id:0 is reserved for background id_rectified = id_new if not has_background: id_rectified += 1 # Creating new category dictionary, using new category ID and the new class name category = { 'supercategory': '', 'id': id_rectified, # Background has id=0 'name': class_new_name, } dataset['categories'].append(category) # assigning new categories # Map class names for class_original in class_originals: # If the new class exists in the value of the class map dict, create new class id if class_map[class_original['name']] == class_new_name: class_ids_map[class_original['id']] = id_rectified # Update annotations category id tag for ann in dataset['annotations']: ann['category_id'] = class_ids_map[ann['category_id']] # Saving the newly created file as a JSON file num_classes = str(len(class_new_names)) ann_out_path = './data' + '/' + 'ann_'+ 'map_to_' + num_classes +'.json' with open(ann_out_path, 'w+') as f: f.write(json.dumps(dataset)) # return path to new file, for loading somewhere else. return str(os.path.abspath(ann_out_path))
fd3c971221a102f296c76f72d6296ebf0a0e4763
3,118
def shave_bd(img, bd): """ Shave border area of spatial views. A common operation in SR. :param img: :param bd: :return: """ return img[bd:-bd, bd:-bd, :]
4b822c5e57787edb74955fd350ad361080b8640b
3,119
from typing import Dict from typing import Any def _apply_modifier(s: str, modifier: str, d: Dict[Any, str]) -> str: """ This will search for the ^ signs and replace the next digit or (digits when {} is used) with its/their uppercase representation. :param s: Latex string code :param modifier: Modifier command :param d: Dict to look upon :return: New text with replaced text. """ s = s.replace(modifier, "^") newtext = "" mode_normal, mode_modified, mode_long = range(3) mode = mode_normal for ch in s: if mode == mode_normal and ch == '^': mode = mode_modified continue elif mode == mode_modified and ch == '{': mode = mode_long continue elif mode == mode_modified: newtext += d.get(ch, ch) mode = mode_normal continue elif mode == mode_long and ch == '}': mode = mode_normal continue if mode == mode_normal: newtext += ch else: newtext += d.get(ch, ch) return newtext
c54a2c66ff6ee768e588b14472fa5707edf9bc56
3,121
def adjust_learning_rate(optimizer, iteration, epoch_size, hyp, epoch, epochs): """adjust learning rate, warmup and lr decay :param optimizer: optimizer :param gamma: gamma :param iteration: iteration :param epoch_size: epoch_size :param hyp: hyperparameters :param epoch: epoch :param epochs: the number of epochs :return: lr """ step_index = 0 if epoch < 6: # The first 6 epochs carried out warm up learning_rate = 1e-6 + (hyp['lr0'] - 1e-6) * iteration / (epoch_size * 2) else: if epoch > epochs * 0.5: # At 50% of the epochs, the learning rate decays in Gamma step_index = 1 if epoch > epochs * 0.7: # At 70% of the epochs, the learning rate decays in Gamma^2 step_index = 2 learning_rate = hyp['lr0'] * (0.1 ** (step_index)) for param_group in optimizer.param_groups: param_group['lr'] = learning_rate return learning_rate
c90c61fcecca99d31214c96cdf7d96b6ba682daa
3,122
def generate_dict_entry(key, wordlist): """Generate one entry of the python dictionary""" entry = " '{}': {},\n".format(key, wordlist) return entry
57ab3c063df0bde1261602f0c6279c70900a7a88
3,124
def line(value): """ | Line which can be used to cross with functions like RSI or MACD. | Name: line\_\ **value**\ :param value: Value of the line :type value: float """ def return_function(data): column_name = f'line_{value}' if column_name not in data.columns: data[column_name] = value return data[column_name].copy() return return_function
07b4f9671ae06cf63c02062a9da4eb2a0b1a265a
3,125
def _is_hangul_syllable(i): """ Function for determining if a Unicode scalar value i is within the range of Hangul syllables. :param i: Unicode scalar value to lookup :return: Boolean: True if the lookup value is within the range of Hangul syllables, otherwise False. """ if i in range(0xAC00, 0xD7A3 + 1): # Range of Hangul characters as defined in UnicodeData.txt return True return False
793519ec33a8920ea13328b0e5a4f814c859b0d3
3,127
def validate_model(model): """ Validate a single data model parameter or a full data model block by recursively calling the 'validate' method on each node working from the leaf nodes up the tree. :param model: part of data model to validate :type model: :graphit:GraphAxis :return: overall successful validation :rtype: :py:bool """ allnodes = model.nodes.keys() leaves = model.leaves(return_nids=True) done = [] def _walk_ancestors(nodes, success=True): parents = [] for node in nodes: node = model.getnodes(node) # Continue only if the node was found and it has a 'validate' method if not node.empty() and hasattr(node, 'validate'): val = node.validate() done.append(node.nid) if not val: return False pnid = node.parent().nid if pnid not in done and pnid in allnodes: parents.append(pnid) if parents: return _walk_ancestors(set(parents), success=success) return success # Recursively walk the tree from leaves up to root. return _walk_ancestors(leaves)
009c629fe80af65f574c698567cb6b5213e9c888
3,128
import os def __modules_with_root_module_path(path): """ Returns all modules beneath the root module path. This treats all directories as packages regardless of whether or not they include a __init__.py. """ modules = [] if os.path.isfile(path) and os.path.splitext(path)[1] == '.py' and os.path.basename(path) != '__init__.py': name = os.path.splitext(os.path.basename(path))[0] modules.append(name) elif os.path.isdir(path): pkg_name = os.path.basename(path) modules.append(pkg_name) for ff in os.listdir(path): modules.extend(['.'.join([pkg_name, m]) for m in __modules_with_root_module_path(os.path.join(path, ff))]) return modules
96086c8f8e7a277033086c2da6a3bdad16c41756
3,129
def is_pull_request_merged(pull_request): """Takes a github3.pulls.ShortPullRequest object""" return pull_request.merged_at is not None
0fecf82b96f7a46cfb4e9895897bd4998d6f225b
3,130
def format_data_hex(data): """Convert the bytes array to an hex representation.""" # Bytes are separated by spaces. return ' '.join('%02X' % byte for byte in data)
27239052d9ca0b12c19977e79d512e0cab04182e
3,134
def sort_dict(original): """Recursively sorts dictionary keys and dictionary values in alphabetical order""" if isinstance(original, dict): res = ( dict() ) # Make a new "ordered" dictionary. No need for Collections in Python 3.7+ for k, v in sorted(original.items()): res[k] = v d = res else: d = original for k in d: if isinstance(d[k], str): continue if isinstance(d[k], list) and len(d[k]) > 1 and isinstance(d[k][0], str): d[k] = sorted(d[k]) if isinstance(d[k], dict): d[k] = sort_dict(d[k]) if isinstance(d[k], list) and len(d[k]) >= 1 and isinstance(d[k][0], dict): for i in range(len(d[k])): d[k][i] = sort_dict(d[k][i]) return d
8c194af76160b0e4d3bad135720e051a4d4622b0
3,135
def train_classifier(classifier, features, labels): """This function must concern itself with training the classifier on the specified data.""" return classifier.fit(features, labels)
ef74548aeb6e245d8728caf3205163c249046aae
3,136
from typing import Dict from typing import List import math def best_broaders(supers_for_all_entities: Dict, per_candidate_links_and_supers: List[Dict], num_best: int = 5, super_counts_field: str = "broader_counts", doprint=False, representativeness_threshold=0.1): """ Returns the best matching super for a candidate class, according to a list of supers for entities in the class and entities in the whole corpus. If comparing to a taxonomy, a super is a broader. @param super_counts_field: @param super_counts: a dictionary that has, for every possible entity, the supers it belongs to @param per_candidate_links_and_supers: a list of dictionaries, one per candidate. Fro each, at least two fields are expected "entities" containing the list of entities, and that given by super_counts_field which is, in turn, a dictionary whose keys are supers and whose values are the number of entities in that candidate having this broad @param num_best: maximum number of best matching supers to be returned @return: for every candidate class, the num_best best matching supers and their log odds ratio """ result = [] global_counts = dict() for ent, bros in supers_for_all_entities.items(): for bro in bros: global_counts[bro] = global_counts.get(bro, 0) + 1 onlytopmost = [] for can in per_candidate_links_and_supers: # For this entity, the following dictionaries have an element for every possible super # Using notation from the paper # T_cc : The number of entities narrower to a candidate which are tagged with NER typeT T_cc = {x: y for x, y in can[super_counts_field].items() if y > representativeness_threshold * len(can["entities"])} if len(T_cc) == 0: T_cc = {x: y for x, y in can[super_counts_field].items()} # T_w : is the number of entities in the wholecorpus tagged with T T_w = {y: global_counts[y] for y in T_cc.keys()} # w : the total number of entities in the whole corpus w = float(len(supers_for_all_entities)) # cc : the total number of entities in this candidate cc = float(len(can["entities"])) # dict of the form super : log_odds log_odds_per_super = {x: math.log((T_cc[x] / cc) / (T_w[x] / w)) for x in T_cc.keys()} logslist = list(log_odds_per_super.items()) logslist.sort(key=lambda x: x[1]) logslist.reverse() maxbroads = min(len(logslist), num_best) logodds = [] for bi in range(maxbroads): logodds.append({"candidatesbroader": logslist[bi][0], "loggods": logslist[bi][1]}) can["log_odds"] = logodds if doprint: print("\t\t---", ", ".join([str(x[1]) for x in logslist[:maxbroads]])) if len(logslist) > 0: onlytopmost.append(logslist[0][1]) can["best_match_broader"] = logslist[0][0] else: onlytopmost.append(None) can["best_match_broader"] = None return onlytopmost
9aa9826c43e67a28eeca463b107296e093709246
3,137
import os def _NormalizedSource(source): """Normalize the path. But not if that gets rid of a variable, as this may expand to something larger than one directory. Arguments: source: The path to be normalize.d Returns: The normalized path. """ normalized = os.path.normpath(source) if source.count('$') == normalized.count('$'): source = normalized return source
5ecaeddf2c3941bbfb0d89ee902a961f7aeab838
3,138
import re def is_heading(line): """Determine whether a given line is a section header that describes subsequent lines of a report. """ has_cattle = re.search(r'steer?|hfrs?|calves|cows?|bulls?', line, re.IGNORECASE) has_price = re.search(r'\$[0-9]+\.[0-9]{2}', line) return bool(has_cattle) and not bool(has_price)
ccbc80f7db61f7ba82aa88e54112d1995d457764
3,139
def merge_dicts(dict1, dict2): """ _merge_dicts Merges two dictionaries into one. INPUTS @dict1 [dict]: First dictionary to merge. @dict2 [dict]: Second dictionary to merge. RETURNS @merged [dict]: Merged dictionary """ merged = {**dict1, **dict2} return merged
67e96ba9c9831e6e2aa4bbd6cd8b8d1d5edb93c4
3,140
import itertools def gen_positions(n, n_boulders): """Generates state codes for boulders. Includes empty rows Parameters: n: number of rows/columns n_boulders: number of boulders per row return value: Possible boulder and alien states """ boulder_positions=[]; b_p=[] alien_positions_with_0=["{}1{}".format('0'*(n-i-1),'0'*(i)) for i in range(n)]+['0'*n] if n_boulders==1: return alien_positions_with_0, alien_positions_with_0[0:n] else: positions=[] position_index=list(itertools.combinations(range(n), n_boulders)) for tup in position_index: pos='' for i in range(n): if i in tup: pos+='1' else: pos+='0' positions.append(pos) if '0'*n not in boulder_positions: positions.append('0'*n) return positions, alien_positions_with_0[0:n]
0a20594f2e021bf8e190f6c7c726159fde0b8367
3,141
def put_path(components, value): """Recursive function to put value in component""" if len(components) > 1: new = components.pop(0) value = put_path(components, value) else: new = components[0] return {new: value}
77db4064a77cf1cdcde1d74d901410525722b66e
3,143
def lookup_material_probase(information_extractor, query, num): """Lookup material in Probase""" material_params = { 'instance': query, 'topK': num } result = information_extractor.lookup_probase(material_params) rank = information_extractor.rank_probase_result_material(result) return rank
9cecf99e3a9689f85788df21ef01d4e86c9a392d
3,144
import traceback import sys def get_error_info(): """Return info about last error.""" msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info())) return msg
539a26f0a6bd6b733aa6e6ff1325faac6a32be12
3,146
import os def append_source_filess(index_filename, source_files, driver): """This appends the paths to different source files to the temporary index file For example SRCSRV: source files --------------------------------------- c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\pdo_dbh.cpp*pdo_sqlsrv/pdo_dbh.cpp c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\pdo_init.cpp*pdo_sqlsrv/pdo_init.cpp ... ... c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\shared\core_stream.cpp*shared/core_stream.cpp c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\shared\core_util.cpp*shared/core_util.cpp SRCSRV: end ------------------------------------------------ """ failed = False with open(index_filename, 'a') as idx_file: idx_file.write('SRCSRV: source files ---------------------------------------' + os.linesep) with open(source_files, 'r') as src_file: for line in src_file: pos = line.find('shared') if (pos > 0): # it's a nested folder, so it must be positive relative_path = line[pos:] src_line = line[:-1] + '*' + relative_path.replace('\\', '/') else: # not a file in the shared folder pos = line.find(driver) if (pos <= 0): print('ERROR: Expected to find', driver, 'in', line) failed = True break else: relative_path = line[pos:] src_line = line[:-1] + '*' + relative_path.replace('\\', '/') idx_file.write(src_line) idx_file.write('SRCSRV: end ------------------------------------------------' + os.linesep) return failed
e26252740c2c64b581d4be56e25086216bc36e1b
3,147
def an(pos=5): """ Alineamiento del texto. @pos: 1: Abajo izquierda 2: Abajo centro 3: Abajo derecha 4: Mitad derecha 5: Mitad centro 6: Mitad derecha 7: Arriba izquierda 8: Arriba centro 9: Arriba derecha """ apos = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] if pos not in apos: raise ValueError('\n\nan(pos):\n<pos> solo acepta los ' 'sigientes valores: ' + str(apos)) else: return '\\an{:d}'.format(pos)
fbe1e89282ebdf7b4977bee295e2cac7735bd652
3,148
import logging def create_logger(logfile=r"/tmp/tomoproc.log"): """Default logger for exception tracking""" logger = logging.getLogger("tomoproc_logger") logger.setLevel(logging.INFO) # create the logging file handler fh = logging.FileHandler(logfile) fh.setFormatter( logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) ) # add handler to logger object logger.addHandler(fh) return logger
a0c005c39af9d24d7198790cf0cfe31a1b6395a0
3,149
import torch def masks_empty(sample, mask_names): """ Tests whether a sample has any non-masked values """ return any(not torch.any(sample[name] != 0) for name in mask_names)
4c13b123fe6f5a17c3cd2ee673c54de331af7b23
3,152
def listSplit(aList, n): """将一个列表以n个元素为一个单元进行均分,返回嵌套列表""" return [aList[i:i+n] for i in range(0,len(aList),n)]
936d4ff5b3bbbc39c57c01dc6a12e42b7dc6e0de
3,153
def _add_left_zeros(number, iteration_digits): """Add zeros to the left side of the experiment run number. Zeros will be added according to missing spaces until iterations_digits are reached. """ number = str(number) return f'{"0" * (iteration_digits - len(number))}{number}'
e3f86a7e7f276ceff4eb662a3f5bc364b4d10ea3
3,154
def default_data_to_device( input, target=None, device: str = "cuda", non_blocking: bool = True ): """Sends data output from a PyTorch Dataloader to the device.""" input = input.to(device=device, non_blocking=non_blocking) if target is not None: target = target.to(device=device, non_blocking=non_blocking) return input, target
8dafddbd52b54a576ddc67d7d79af4372fbd57dc
3,155
def vec_add(iter_a, iter_b): """element wise addition""" if len(iter_a) != len(iter_b): raise ValueError return (a + b for a, b in zip(iter_a, iter_b))
f3e5bf50d61cfe518ee8b0eb838503a7f054baa8
3,156
def get_info(obj): """ get info from account obj :type obj: account object :param obj: the object of account :return: dict of account info """ if obj: return dict(db_instance_id=obj.dbinstance_id, account_name=obj.account_name, account_status=obj.account_status, account_type=obj.account_type, account_description=obj.account_description, database_privileges=obj.database_privileges) return {}
c654ab1bdb4b4bf20223172dae450e1e7e6a52b9
3,157
import collections import random def random_sample_with_weight_and_cost(population, weights, costs, cost_limit): """ Like random_sample_with_weight but with the addition of a cost and limit. While performing random samples (with priority for higher weight) we'll keep track of cost If cost exceeds the cost limit, we stop selecting Basically the knapsack problem, but with deliberately random selection rather than dynamic optimization """ population_weights = {request: weight for (request, weight) in zip(population, weights)} population_costs = {request: cost for (request, cost) in zip(population, costs)} selected = [] not_selected = [] cost = 0 # Create a Counter from the population, assigning count by weight counter = collections.Counter(population_weights) while counter: # Turn the Counter into a list for random selection from # The list will have n repetitions of an element with weight n choice = random.choice(list(counter.elements())) choice_cost = population_costs[choice] # If the cost would cause us to exceed our limit it shouldn't be selected if cost + choice_cost > cost_limit: not_selected.append(choice) else: cost += choice_cost selected.append(choice) # When chosen (whether selected or not), remove the element from the population # Effectively removes all repetitions of the element counter.pop(choice) return selected, not_selected
637afd1c0e83bbda879f41bd15feb0f65b238fb3
3,159
def _client_row_class(client: dict) -> str: """ Set the row class depending on what's in the client record. """ required_cols = ['trust_balance', 'refresh_trigger'] for col in required_cols: if col not in client: return 'dark' try: if client['trust_balance'] > client['refresh_trigger']: return 'success' except TypeError: return 'dark' return 'danger'
cd5ebd8fd64c7d994d6803df473cd317af65e9ac
3,161
from typing import Iterable from typing import List def flatten(l: Iterable) -> List: """Return a list of all non-list items in l :param l: list to be flattened :return: """ rval = [] for e in l: if not isinstance(e, str) and isinstance(e, Iterable): if len(list(e)): rval += flatten(e) else: rval.append(e) return rval
2d2202c21e6da7064491d55d5519c259d10f42c0
3,163
def create_vector_clock(node_id, timeout): """This method builds the initial vector clock for a new key. Parameters ---------- node_id : int the id of one node in the cluster timeout : int the expire timeout of the key Returns ------- dict the vector clock as dictonary """ if node_id is not None and timeout is not None: return { "versions": [{"nodeId": node_id, "version": 1}], "timestamp": timeout } else: raise ValueError("You must gave the node id and the timeout.")
ed6df0e7e493d448f52e5fe47b55df8a1de94543
3,164
def stellar_radius(M, logg): """Calculate stellar radius given mass and logg""" if not isinstance(M, (int, float)): raise TypeError('Mass must be int or float. {} type given'.format(type(M))) if not isinstance(logg, (int, float)): raise TypeError('logg must be int or float. {} type given'.format(type(logg))) if M < 0: raise ValueError('Only positive stellar masses allowed.') M = float(M) return M/(10**(logg-4.44))
2afbd991c7461d7861370f18d90df840569da857
3,166
def set_plus_row(sets, row): """Update each set in list with values in row.""" for i in range(len(sets)): sets[i].add(row[i]) return sets
87f448dc3199c8d3137d5811dd184b3d2bd7cbe3
3,167
from typing import List from typing import Union def bytes_to_string( bytes_to_convert: List[int], strip_null: bool = False ) -> Union[str, None]: """ Litteral bytes to string :param bytes_to_convert: list of bytes in integer format :return: resulting string """ try: value = "".join(chr(i) for i in bytes_to_convert) if strip_null: return value.strip("\x00") return value # AttributeError when None object has no strip attribute except (ValueError, TypeError, AttributeError): return None
a04dee89fb8aed33b6069a7ff0ca8c497d0a6062
3,168
import math def gauss_distribution(x, mu, sigma): """ Calculate value of gauss (normal) distribution Parameters ---------- x : float Input argument mu : Mean of distribution sigma : Standard deviation Returns ------- float Probability, values from range [0-1] """ return 1 / (2 * math.sqrt(math.pi) * sigma) * math.exp(-(1 / 2) * ((x - mu) / sigma) ** 2)
05cf2c14b337b45a81ddbe7655b4d7cf21e352cd
3,169
def manhatten(type_profile, song_profile): """ Calculate the Manhatten distance between the profile of specific output_colums value (e.g. specific composer) and the profile of a song """ # Sort profiles by frequency type_profile = type_profile.most_common() song_profile = song_profile.most_common() flat_type_profile = [ngram for (ngram, freq) in type_profile] flat_song_profile = [ngram for (ngram, freq) in song_profile] manhatten = 0 for i in range(len(flat_song_profile)): ngram = flat_song_profile[i] if ngram in flat_type_profile: manhatten += abs(flat_type_profile.index(ngram) - i) else: manhatten += abs(len(flat_type_profile) - i) return manhatten
4703585f9f60551bf2a5e2762612d45efb47a453
3,171
def _mark_untranslated_strings(translation_dict): """Marks all untranslated keys as untranslated by surrounding them with lte and gte symbols. This function modifies the translation dictionary passed into it in-place and then returns it. """ # This was a requirement when burton was written, but may be an unwanted # side effect for other projects that adopt burton. We should replace it # with something more flexible. for key in translation_dict: if key is not None and translation_dict[key] is None: translation_dict[key] = u"\u2264" + key + u"\u2265" return translation_dict
d15ac2d0fe8d50d5357bcc1e54b9666f7076aefd
3,172
from typing import Dict def serialize(name: str, engine: str) -> Dict: """Get dictionary serialization for a dataset locator. Parameters ---------- name: string Unique dataset name. engine: string Unique identifier of the database engine (API). Returns ------- dict """ return {'name': name, 'database': engine}
9ab11318050caf3feb4664310e491ed48e7e5357
3,175
def support_acctgroup_acctproject(version): """ Whether this Lustre version supports acctgroup and acctproject """ if version.lv_name == "es2": return False return True
858ec772a90e66431731ffcdd145fa7e56daad02
3,177
from typing import AnyStr import unicodedata def normalize_nfc(txt: AnyStr) -> bytes: """ Normalize message to NFC and return bytes suitable for protobuf. This seems to be bitcoin-qt standard of doing things. """ str_txt = txt.decode() if isinstance(txt, bytes) else txt return unicodedata.normalize("NFC", str_txt).encode()
12b6e037225878e0bbca1d52d9f58d57abb35746
3,180
def set_or_none(list_l): """Function to avoid list->set transformation to return set={None}.""" if list_l == [None]: res = None else: res = set(list_l) return res
ee5fb4539e63afc7fd8013610229d9ab784b88c5
3,184
import re def case_mismatch(vm_type, param): """Return True if vm_type matches a portion of param in a case insensitive search, but does not equal that portion; return False otherwise. The "portions" of param are delimited by "_". """ re_portion = re.compile( "(^(%(x)s)_)|(_(%(x)s)_)|(_(%(x)s)$)" % dict(x=vm_type), re.IGNORECASE ) found = re_portion.search(param) if found: param_vm_type = [x for x in found.groups()[1::2] if x][0] return param_vm_type != vm_type else: return False
e7fb565ac6e10fd15dd62a64fbf7f14a8bcfde6b
3,185
import math def perm(x, y=None): """Return the number of ways to choose k items from n items without repetition and with order.""" if not isinstance(x, int) or (not isinstance(y, int) and y is not None): raise ValueError(f"Expected integers. Received [{type(x)}] {x} and [{type(y)}] {y}") return math.perm(x, y)
c9ad65c6ce3cc3e5ba488c5f2ddd1aabbdc7da6a
3,186
def _padwithzeros(vector, pad_width, iaxis, kwargs): """Pad with zeros""" vector[: pad_width[0]] = 0 vector[-pad_width[1] :] = 0 return vector
1a3a9fc4fd3b0fc17a905fa9ecd283d60310655d
3,187
from collections import deque from typing import Iterable from typing import Deque def array_shift(data: Iterable, shift: int) -> Deque: """ left(-) or right(+) shift of array >>> arr = range(10) >>> array_shift(arr, -3) deque([3, 4, 5, 6, 7, 8, 9, 0, 1, 2]) >>> array_shift(arr, 3) deque([7, 8, 9, 0, 1, 2, 3, 4, 5, 6]) """ deq = deque(data) deq.rotate(shift) return deq
c14e115808592808bc9b0cf20fa8bc3d5ece7768
3,189
import math def parents(level, idx): """ Return all the (grand-)parents of the Healpix pixel idx at level (in nested format) :param level: Resolution level :param idx: Pixel index :return: All the parents of the pixel """ assert idx < 12 * 2 ** (2 * level) plpairs = [] for ind in range(level, 0, -1): idx = int(math.floor(idx / 4)) plpairs.append(tuple((ind - 1, idx))) level -= 1 return plpairs[::-1]
355c3acffa07065de10049059ef064abefdd7ca0
3,190
def compare_data_identifiers(a, b): """Checks if all the identifiers match, besides those that are not in both lists""" a = {tuple(key): value for key, value in a} b = {tuple(key): value for key, value in b} matching_keys = a.keys() & b.keys() a = {k: v for k, v in a.items() if k in matching_keys} b = {k: v for k, v in b.items() if k in matching_keys} return a == b
f0f5f08e4cc685b62b2af19e0c724561988ed1b9
3,191
def canonical_order(match): """ It does not make sense to define a separate bond between atoms 1 and 2, and between atoms 2 and 1. This function will swap the atoms in the bond if the first atom > second atom. """ # match[0][0:2] contains the ID numbers for the 2 atoms in the match atom0 = match[0][0] atom1 = match[0][1] # match[1][0:1] contains the ID numbers for the 1 bond bond0 = match[1][0] if atom0 < atom1: # return ((atom0, atom1), (bond0)) same thing as: return match else: return ((atom1, atom0), (bond0))
ea268fedaa365e0fad3ea49944cc1d1bb5fa7a51
3,192
from typing import List from pathlib import Path def files_filter_ext(files: List[Path], ext: str) -> List[Path]: """Filter files from a list matching a extension. Args: files: List of files. ext: Extension to filter. Returns: List of files that have the extension. """ return [f for f in files if f.suffix == ext]
0ed134583f9fa4868111d1475b8be4d67ba4feb7
3,193
import os def input_files_exist(paths): """Ensure all the input files actually exist. Args: paths (list): List of paths. Returns: bool: True if they all exist, False otherwise. """ for path in paths: if not os.path.isfile(path): return False return True
7feba57335cdf435950bef1e01b9ab09c1b5f9c1
3,195
import requests def get_qid_for_title(title): """ Gets the best Wikidata candidate from the title of the paper. """ api_call = f"https://www.wikidata.org/w/api.php?action=wbsearchentities&search={title}&language=en&format=json" api_result = requests.get(api_call).json() if api_result["success"] == 1: return(api_result["search"][0]["id"])
663db71c7a1bbf1617941ba81c5fa3b7d359e00b
3,196
def get_ind_sphere(mesh, ind_active, origin, radius): """Retreives the indices of a sphere object coordintes in a mesh.""" return ( (mesh.gridCC[ind_active, 0] <= origin[0] + radius) & (mesh.gridCC[ind_active, 0] >= origin[0] - radius) & (mesh.gridCC[ind_active, 1] <= origin[1] + radius) & (mesh.gridCC[ind_active, 1] >= origin[1] - radius) & (mesh.gridCC[ind_active, 2] <= origin[2] + radius) & (mesh.gridCC[ind_active, 2] >= origin[2] - radius) )
9e246c3c0d3d7750a668476f0d0d90b28c46fc27
3,197
def tidy_output(differences): """Format the output given by other functions properly.""" out = [] if differences: out.append("--ACLS--") out.append("User Path Port Protocol") for item in differences: #if item[2] != None: #En algunos casos salían procesos con puerto None out.append("%s %s %s %s" % item) # En item queda un elemento que es el protocolo # no se usa en la salida normal return out
2a7007ae16e91b111f556ea95eedc466a8606494
3,198
import os def find_files(top, exts): """Return a list of file paths with one of the given extensions. Args: top (str): The top level directory to search in. exts (tuple): a tuple of extensions to search for. Returns: a list of matching file paths. """ return [os.path.join(dirpath, name) for dirpath, dirnames, filenames in os.walk(top) for name in filenames if name.endswith(exts)]
bb13d91b234b7411fd51e66fda96d5622ec11a1d
3,199
def add_hp_label(merged_annotations_column, label_type): """Adds prefix to annotation labels that identify the annotation as belonging to the provided label_type (e.g. 'h@' for host proteins). Parameters ---------- merged_annotations_column : array-like (pandas Series)) An array containing sets of annotations that need to be labeled. e.g. 0 {GO:0010008, GO:0070062, IPR036865, GO:0048471... 1 {GO:0006351, GO:0070062, GO:0007623, GO:004851... 2 {GO:0019888, GO:0006470, GO:0001754, GO:009024... label_type : str The prefix to be appended (without the "@" separator). Returns ------- labeled_annotations : array-like (pandas Series) A new pandas Series where all annotations have received a prefix. """ labeled_annotations = merged_annotations_column.map( lambda x: set([label_type + '@' + i for i in x])) return labeled_annotations
648f548931a1fae5d19291d81f2355a0a00877c3
3,200
def return_union_close(): """union of statements, close statement""" return " return __result"
c1a1b6b6b1164a641a7f9e598eec346af13f2aa7
3,201
def get_val(tup): """Get the value from an index-value pair""" return tup[1]
5966bbbb28006c46eaf11afaef152573aaaa8d2a
3,202
import os import subprocess def repoinit(testconfig, profiler=None): """Determines revision and sets up the repo. If given the profiler optional argument, wil init the profiler repo instead of the default one.""" revision = '' #Update the repo if profiler == "gnu-profiler": if testconfig.repo_prof is not None: os.chdir(testconfig.repo_prof) else: raise ValueError('Profiling repo is not defined') elif profiler == "google-profiler": if testconfig.repo_gprof is not None: os.chdir(testconfig.repo_gprof) else: raise ValueError('Profiling repo is not defined') else: os.chdir(testconfig.repo) #Checkout specific branch, else maintain main branch if testconfig.branch != 'master': subprocess.call(['git', 'checkout', testconfig.branch]) rev, _ = subprocess.Popen(['git', 'rev-parse', 'HEAD'],\ stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() revision = str(rev).replace("\\n'", '').replace("b'", '') else: subprocess.call(['git checkout master'], shell=True) #Check a specific revision. Else checkout master. if testconfig.revision: subprocess.call(['git', 'checkout', testconfig.revision]) revision = testconfig.revision elif testconfig.branch == 'master': subprocess.call(['git pull'], shell=True) rev, _ = subprocess.Popen(['git rev-parse HEAD'], stdout=subprocess.PIPE,\ stderr=subprocess.PIPE, shell=True).communicate() revision = str(rev).replace("\\n'", '').replace("b'", '') return revision
b2348bb6ba6eb8284119a9b7bcbe162702338946
3,203
import argparse def parse_args(): """Parse input arguments Return: parsed arguments struncture """ parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='type') subparsers.required = True parser_file = subparsers.add_parser('file') parser_file.add_argument( "-i", "--input", help="Input file name.", required=True) parser_file.add_argument( "-d", "--database", help="Token database.", required=True) parser_file.add_argument( "-o", "--output", help="Output file name.", required=True) parser_file = subparsers.add_parser('serial') parser_file.add_argument( "-i", "--input", help="Input serial port name.", required=True) parser_file.add_argument( "-d", "--database", help="Token database.", required=True) parser_file.add_argument( "-o", "--output", help="Output file name. Write to stdout and to file.") return parser.parse_args()
bb2fe92206bd786492dda2668ffa9c2d0d54004e
3,204
import re from typing import OrderedDict def read_avg_residuemap(infile): """ Read sequence definition from PSN avg file, returning sequence Map :param infile: File handle pointing to WORDOM avgpsn output file :return: Returns an internal.map.Map object mapping the .pdb residues to WORDOM id's from "Seq" section of the avgpsn-file """ m_start = re.compile("^\*\*\* Seq \*\*\*") m_end = re.compile("^============") m_entry = re.compile("^\s*\d+\s+.:.\d+\s+\d+\.\d+\s*$") residuemap = OrderedDict() reading = False for line in infile: if reading: # Stop reading if end of interaction strength section if m_end.search(line): break else: if m_entry.search(line): [num, resname, normfact] = line.split() residuemap[resname] = int(num) # Start reading when header found elif m_start.search(line): reading = True return residuemap
92c4cbe53edcd3d894a038d7cb9308c653e37146
3,206
import re def check_playlist_url(playlist_url): """Check if a playlist URL is well-formated. Parameters ---------- playlist_url : str URL to a YouTube playlist. Returns ------- str If the URL is well-formated, return the playlist ID. Else return `None`. """ match = re.match( r"https?://www\.youtube\.com/playlist\?list=(.+)", playlist_url.strip() ) if match is None: raise ValueError("Incorrect URL: %s" % playlist_url) return match.group(1)
b14808e3dc25fcb7f91e9b66ec5f31ae869c6ae5
3,212
def f(t, T): """ returns -1, 0, or 1 based on relationship between t and T throws IndexError """ if(t > 0 and t < float(T/2)): return 1 elif(t == float(T/2)): return 0 elif(t > float(T/2) and t < T): return -1 raise IndexError("Out of function domain")
f2365094d41d2a151322ad640dcf4b290dd1de79
3,213
def maxsubarray(list): """ Find a maximum subarray following this idea: Knowing a maximum subarray of list[0..j] find a maximum subarray of list[0..j+1] which is either (I) the maximum subarray of list[0..j] (II) or is a maximum subarray list[i..j+1] for some 0 <= i <= j We can determine (II) in constant time by keeping a max subarray ending at the current j. This is done in the first if of the loop, where the max subarray ending at j is max(previousSumUntilJ + array[j], array[j]) This works because if array[j] + sum so far is less than array[j] then the sum of the subarray so far is negative (and less than array[j] in case it is also negative) so it has a bad impact on the subarray until J sum and we can safely discard it and start anew from array[j] Complexity (n = length of list) Time complexity: O(n) Space complexity: O(1) """ if len(list) == 0: return (-1, -1, 0) # keep the max sum of subarray ending in position j maxSumJ = list[0] # keep the starting index of the maxSumJ maxSumJStart = 0 # keep the sum of the maximum subarray found so far maxSum = list[0] # keep the starting index of the current max subarray found maxStart = 0 # keep the ending index of the current max subarray found maxEnd = 0 for j in range(1, len(list)): if maxSumJ + list[j] >= list[j]: maxSumJ = maxSumJ + list[j] else: maxSumJ = list[j] maxSumJStart = j if maxSum < maxSumJ: maxSum = maxSumJ maxStart = maxSumJStart maxEnd = j return (maxSum, maxStart, maxEnd)
a991ca09c0594b0d47eb4dd8be44d093d593cd36
3,215
def sort_cipher_suites(cipher_suites, ordering): """Sorts the given list of CipherSuite instances in a specific order.""" if ordering == 'asc': return cipher_suites.order_by('name') elif ordering == 'desc': return cipher_suites.order_by('-name') else: return cipher_suites
5a554ba1e2e4d82f53f29c5a1c2f4d311f538889
3,216
import torch def reward(static, tour_indices): """ Euclidean distance between all cities / nodes given by tour_indices """ # Convert the indices back into a tour idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1) tour = torch.gather(static.data, 2, idx).permute(0, 2, 1) # Ensure we're always returning to the depot - note the extra concat # won't add any extra loss, as the euclidean distance between consecutive # points is 0 start = static.data[:, :, 0].unsqueeze(1) y = torch.cat((start, tour, start), dim=1) # Euclidean distance between each consecutive point tour_len = torch.sqrt(torch.sum(torch.pow(y[:, :-1] - y[:, 1:], 2), dim=2)) return tour_len.sum(1)
f7197bcfb3699cafa4df3c1430b4f9ee1bf53242
3,217
def abc19(): """Solution to exercise C-1.19. Demonstrate how to use Python’s list comprehension syntax to produce the list [ a , b , c , ..., z ], but without having to type all 26 such characters literally. """ a_idx = 97 return [chr(a_idx + x) for x in range(26)]
c9bb948ad57ddbc138dfbc0c481fabb45de620ba
3,218
import os import json def _get_corpora_json_contents(corpora_file): """ Get the contents of corpora.json, or an empty dict """ exists = os.path.isfile(corpora_file) if not exists: print("Corpora file not found at {}!".format(corpora_file)) return dict() with open(corpora_file, "r") as fo: return json.loads(fo.read())
d2a0dcf8bbbca573121237190b96c8d372d027b4
3,219
def moveTo(self, parent): """Move this element to new parent, as last child""" self.getParent().removeChild(self) parent.addChild(self) return self
40caa9681346db9a6cfb5c95fdb761a9f98e607a
3,220
def is_num_idx(k): """This key corresponds to """ return k.endswith("_x") and (k.startswith("tap_x") or k.startswith("sig"))
bd4ed2c9c4a24ae423ec6c738d99b31ace6ec267
3,221
def conditional(condition, decorator): """ Decorator for a conditionally applied decorator. Example: @conditional(get_config('use_cache'), ormcache) def fn(): pass """ if condition: return decorator else: return lambda fn: fn
7c17ad3aaacffd0008ec1cf66871ea6755f7869a
3,222
import statistics def variance(data, mu=None): """Compute variance over a list.""" if mu is None: mu = statistics.mean(data) return sum([(x - mu) ** 2 for x in data]) / len(data)
92f89d35c2ae5abf742b10ba838a381d6f74e92c
3,223
def IMF_N(m,a=.241367,b=.241367,c=.497056): """ returns number of stars with mass m """ # a,b,c = (.241367,.241367,.497056) # a=b=c=1/3.6631098624 if .1 <= m <= .3: res = c*( m**(-1.2) ) elif .3 < m <= 1.: res = b*( m**(-1.8) ) elif 1. < m <= 100.: # res = a*( m**(-1.3)-100**(-1.3) )/1.3 res = a*( m**(-2.3) ) else: res = 0 return res
4d120af2840a793468335cddd867f6d29940d415
3,224
import random def get_random_tcp_start_pos(): """ reachability area: x = [-0.2; 0.4] y = [-0.28; -0.1] """ z_up = 0.6 tcp_x = round(random.uniform(-0.2, 0.4), 4) tcp_y = round(random.uniform(-0.28, -0.1), 4) start_tcp_pos = (tcp_x, tcp_y, z_up) # start_tcp_pos = (-0.2, -0.28, z_up) return start_tcp_pos
adf87dec45bf5a81c321f94c93d45a67f0aeff0d
3,226
def _to_system(abbreviation): """Converts an abbreviation to a system identifier. Args: abbreviation: a `pronto.Term.id` Returns: a system identifier """ try: return { 'HP': 'http://www.human-phenotype-ontology.org/' }[abbreviation] except KeyError: raise RuntimeError( 'system abbreviation \'%s\' is not supported' % abbreviation)
f43942b242e67866028a385e6614133dc25b31b0
3,227
def find_edges(mesh, key): """ Temp replacement for mesh.findEdges(). This is painfully slow. """ for edge in mesh.edges: v = edge.vertices if key[0] == v[0] and key[1] == v[1]: return edge.index
98247b64a0e5671a7dbbf314f314cef2c5c8aae3
3,228
def _process_create_group(event: dict) -> list: """ Process CreateGroup event. This function doesn't set tags. """ return [event['responseElements']['group']['groupName']]
978b3ffc3c4aa72165914b79dc06cb7691c5c5a5
3,230
def egg_translator(cell): """If the cell has the DNA for harboring its offspring inside it, granting it additional food and protection at the risk of the parent cell, it is an egg. Active DNA: x,A,(C/D),x,x,x """ dna = cell.dna.split(',') if dna[1] == 'A' and dna[2] == 'C': return True elif dna[1] == 'A' and dna[2] == 'D': return True else: return False del dna[:]
af0d9097c8a0b5002722c79d6ec8262a66cc375d
3,231
import random def randbit(): """Returns a random bit.""" return random.randrange(2)
4b47101df7368b7cb423920e6a5338b76ab4ecaa
3,232
def calc_points(goals, assists): """ Calculate the total traditional and weighted points for all players, grouped by player id. Author: Rasmus Säfvenberg Parameters ---------- goals : pandas.DataFrame A data frame with total goals and weighted assists per player. assists : pandas.DataFrame A data frame with total assists and weighted assists per player. Returns ------- points : pandas.DataFrame A data frame with total points and weighted points per player. """ # Specify columns to keep for merging goals = goals[["PlayerId", "PlayerName", "Position", "Goals", "WeightedGoals"]] assists = assists[["PlayerId", "PlayerName", "Position", "Assists", "WeightedAssists"]] # Combine goals and assists points = goals.merge(assists, on=["PlayerId", "PlayerName", "Position"], how="outer") # Fill missing values with 0 (some players only score goals etc.) points.fillna(0, inplace=True) # Calculate points = goals + assists points["Points"] = points["Goals"] + points["Assists"] # Calculate weighted points = weighted goals + weighted assists points["WeightedPoints"] = points["WeightedGoals"] + points["WeightedAssists"] # Sort by weighted points points.sort_values("WeightedPoints", ascending=False, inplace=True) return points
1801cf2602a473bdf532e1c0ee58b883dc3e79d1
3,233
def rosenbrock_grad(x, y): """Gradient of Rosenbrock function.""" return (-400 * x * (-(x ** 2) + y) + 2 * x - 2, -200 * x ** 2 + 200 * y)
c7acf0bbe11a6d1cbb38b6853eb1b508e3846657
3,236
import os import glob def get_patient_dirs(root_dir): """ Function used to get the root director for all patients :param root_dir: root director of all image data :return patient_paths: list of all patient paths, one for each patient """ search_path = os.path.join(root_dir, '[0-1]', '*') patient_paths = glob.glob(search_path) return patient_paths
d0d38f02214175b867fd8bf8b1e13db8ee8a83f2
3,237
def digitize(n): """Convert a number to a reversed array of digits.""" l = list(str(n)) n_l = [] for d in l: n_l.append(int(d)) n_l.reverse() return n_l
e4355b68da41e4be87ce18b53afb2a406eb120c7
3,238