content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def read_csv_to_lol(full_path, sep=";"): """ Read csv file into lists of list. Make sure to have a empty line at the bottom """ with open(full_path, 'r') as ff: # read from CSV data = ff.readlines() # New line at the end of each line is removed data = [i.replace("\n", "") for i in data] # Creating lists of list data = [i.split(sep) for i in data] return data
e53c46c6a8eabaece788111530fbf859dd23133f
708,118
import json import sys import os def json_loads(data): """Load json data, allowing - to represent stdin.""" if data is None: return "" if data == "-": return json.load(sys.stdin) elif os.path.exists(data): with open(data, 'r') as handle: return json.load(handle) else: return json.loads(data)
f5bdad826578108adccc32ca93ecd474954bfb7d
708,119
def make_predictions(clf_object,predictors_str,data_source): """make_predictions comes up with predictions from given input data Input: clf_object object constructed classification model predictors_str nd str array string array containing names of predictors data_source ndarray source of data either from valid or test Output: preds ndarray prediction classes based on given input data """ preds = clf_object.predict(data_source[predictors_str]) return preds
ed5f29e65ddf3d7f7081b89e6f747925de944567
708,120
import random import string def generate_random_string(N): """ Generate a random string Parameters ------------- N length of the string Returns ------------- random_string Random string """ return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
3e2e672140e18546260a0882fa6cf06073bdf8e7
708,121
import re def extract_charm_name_from_url(charm_url): """Extract the charm name from the charm url. E.g. Extract 'heat' from local:bionic/heat-12 :param charm_url: Name of model to query. :type charm_url: str :returns: Charm name :rtype: str """ charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1]) return charm_name.split(':')[-1]
9905d6b5c7a2f5047bc939d1b6e23d128ee8984d
708,122
def class_name(service_name: str) -> str: """Map service name to .pyi class name.""" return f"Service_{service_name}"
b4bed8a677f9eedfcd66d6d37078075b0967ea20
708,123
import re def select_devices(devices): """ 选择设备 """ device_count = len(devices) print("Device list:") print("0) All devices") for i, d in enumerate(devices, start=1): print("%d) %s\t%s" % (i, d['serial'], d['model'])) print("q) Exit this operation") selected = input("\nselect: ") nums = None if selected == '0': nums = range(0, device_count) elif selected == 'q': print("Exit this operation") exit(-1) else: nums = [] for i in re.split(r'[\s+,]', selected): if i.isdigit(): seq = int(i) - 1 if 0 <= seq < device_count: nums.append(seq) continue print("error input: %s, retry again\n" % i) return select_devices(devices) return nums
91c405c8a198deb01e8abecc592ac2286dc712fd
708,124
def is_number(s): """ Check if it is a number. Args: s: The variable that needs to be checked. Returns: bool: True if float, False otherwise. """ try: float(s) return True except ValueError: return False
071aeac26a5a907caf1764dc20d7de1c6408714b
708,125
import itertools def combineSets(listOfSets): """ Combines sets of strings by taking the cross product of the sets and \ concatenating the elements in the resulting tuples :param listOfSets: 2-D list of strings :returns: a list of strings """ totalCrossProduct = [''] for i in range(len(listOfSets)): currentProduct = [] for crossProduct in itertools.product(totalCrossProduct, listOfSets[i]): currentProduct.append((crossProduct[0].strip() + ' ' + crossProduct[1].strip()).strip()) totalCrossProduct = currentProduct return totalCrossProduct
26a383d224716fd8f4cf8589607e2df1ccb82a7e
708,126
def _GetTombstoneData(device, tombstone_file): """Retrieve the tombstone data from the device Args: device: An instance of DeviceUtils. tombstone_file: the tombstone to retrieve Returns: A list of lines """ return device.old_interface.GetProtectedFileContents( '/data/tombstones/' + tombstone_file)
99322ea3d67e150f4433c713159eb7bc8069271f
708,127
import time def _strTogYear(v): """Test gYear value @param v: the literal string @return v @raise ValueError: invalid value """ try: time.strptime(v+"-01-01", "%Y-%m-%d") return v except: raise ValueError("Invalid gYear %s" % v)
a65e04c2d3790d3d55bbc8788d6802e1aae1b78c
708,128
import random def giveHint(indexValue, myBoard): """Return a random matching card given the index of a card and a game board""" validMatches = [] card = myBoard[indexValue] for c in myBoard: if (card[0] == c[0]) and (myBoard.index(c) != indexValue): validMatches.append(myBoard.index(c)) return random.choice(validMatches)
e578f40e7d7e2e17ddac53f9cfdc219e47c861cd
708,129
async def make_getmatch_embed(data): """Generate the embed description and other components for a getmatch() command. As with its parent, remember that this currently does not support non team-vs. `data` is expected to be the output of `get_individual_match_data()`. The following `dict` is returned: ``` { "embed_description": str, "footer": str, "embed_color": int (as color hex), } ``` """ scores = data["individual_scores"] team_1_score_strings = [] team_2_score_strings = [] for individual_score in scores: #at first i thought doing this would make the actual score_string more readable #now i'm not very sure player_name = individual_score["user_name"] score_val = individual_score["score"] maxcombo = individual_score["combo"] accuracy = individual_score["accuracy"] count_300 = individual_score["hits"]["300_count"] count_100 = individual_score["hits"]["100_count"] count_50 = individual_score["hits"]["50_count"] count_miss = individual_score["hits"]["miss_count"] accuracy = '{:.2%}'.format(accuracy) score_val = "{:,}".format(score_val) maxcombo = "{:,}".format(maxcombo) score_string = (f'**{player_name}** - {score_val} ({maxcombo}x) ({accuracy} - {count_300}/{count_100}/{count_50}/{count_miss})') team_1_score_strings.append(score_string) if individual_score["team"] == "1" else team_2_score_strings.append(score_string) team_1_score_string = "\n".join(team_1_score_strings) team_2_score_string = "\n".join(team_2_score_strings) winner_string = { "Blue": f"Blue team wins by {'{:,}'.format(data['score_difference'])}!", "Red": f"Red team wins by {'{:,}'.format(data['score_difference'])}!", "Tie": "Tie!"} winner_color = { "Blue": 0x0000FF, "Red": 0xFF0000, "Tie": 0x808080} embed_desc = ( f'**{winner_string[data["winner"]]}**\n\n' f'__Blue Team__ ({"{:,}".format(data["team_1_score"])} points, {"{:,}".format(data["team_1_score_avg"])} average)\n' f'{team_1_score_string}\n\n' f'__Red Team__ ({"{:,}".format(data["team_2_score"])} points, {"{:,}".format(data["team_2_score_avg"])} average)\n' f'{team_2_score_string}') #footer stuff scoring_types = { '0': 'Score', '1': 'Accuracy', '2': 'Combo', '3': 'Score v2'} team_types = { '0': 'Head-to-head', '1': 'Tag Co-op', '2': 'Team VS', '3': 'Tag Team VS'} play_modes = { '0': 'osu!', '1': 'Taiko', '2': 'CTB', '3': 'osu!mania'} embed_footer = (f'Played at {data["start_time"]} UTC | ' f'Win condition: {scoring_types[data["scoring_type"]]} | ' f'{team_types[data["team_type"]]} | ' f'{play_modes[data["play_mode"]]}') final = { "embed_description": embed_desc, "footer": embed_footer, "embed_color": winner_color[data["winner"]], } return final
c37e0d6ee948259e4ad898d3cafb8e13b6452d80
708,130
import torch def compute_inverse_interpolation_img(weights, indices, img, b, h_i, w_i): """ weights: [b, h*w] indices: [b, h*w] img: [b, h*w, a, b, c, ...] """ w0, w1, w2, w3 = weights ff_idx, cf_idx, fc_idx, cc_idx = indices k = len(img.size()) - len(w0.size()) img_0 = w0[(...,) + (None,) * k] * img img_1 = w1[(...,) + (None,) * k] * img img_2 = w2[(...,) + (None,) * k] * img img_3 = w3[(...,) + (None,) * k] * img img_out = torch.zeros(b, h_i * w_i, *img.shape[2:]).type_as(img) ff_idx = torch.clamp(ff_idx, min=0, max=h_i * w_i - 1) cf_idx = torch.clamp(cf_idx, min=0, max=h_i * w_i - 1) fc_idx = torch.clamp(fc_idx, min=0, max=h_i * w_i - 1) cc_idx = torch.clamp(cc_idx, min=0, max=h_i * w_i - 1) img_out.scatter_add_(1, ff_idx[(...,) + (None,) * k].expand_as(img_0), img_0) img_out.scatter_add_(1, cf_idx[(...,) + (None,) * k].expand_as(img_1), img_1) img_out.scatter_add_(1, fc_idx[(...,) + (None,) * k].expand_as(img_2), img_2) img_out.scatter_add_(1, cc_idx[(...,) + (None,) * k].expand_as(img_3), img_3) return img_out
6b69aa5ca372a9c8f976512191d4626919d71311
708,131
def decode(value): """Decode utf-8 value to string. Args: value: String to decode Returns: result: decoded value """ # Initialize key variables result = value # Start decode if value is not None: if isinstance(value, bytes) is True: result = value.decode('utf-8') # Return return result
9704678f6ff96de3b711758922c28f5ecbd11bc7
708,133
def _parse_none(arg, fn=None): """Parse arguments with support for conversion to None. Args: arg (str): Argument to potentially convert. fn (func): Function to apply to the argument if not converted to None. Returns: Any: Arguments that are "none" or "0" are converted to None; otherwise, returns the original value. """ if arg.lower() in ("none", "0"): return None return arg if fn is None else fn(arg)
4ebd283eb9e2218e523ba185c4500c9879d5719d
708,135
def generate_constraint(category_id, user): """ generate the proper basic data structure to express a constraint based on the category string """ return {'year': category_id}
f55151a5b4b17bbf6eb697e1b1489ee4897f5db0
708,136
import os def input_file_exists(filepath): """ Return True if the file path exists, or is the stdin marker. """ return (filepath == '-') or os.path.exists(filepath)
5f6a0c2195ce90ba551679d516ebee0e593184c8
708,137
from typing import List from typing import Set def ladder_length(beginWord: str, endWord: str, wordList: List[str]) -> int: """ 双端交替迫近目标层,根据一层数量最多节点确定为目标层 :param beginWord: :param endWord: :param wordList: :return: >>> ladder_length('hit', 'cog', ["hot","dot","dog","lot","log","cog"]) 5 >>> ladder_length('hit', 'cog', ["hot","dot","dog","lot","log"]) 0 >>> ladder_length("hit","cog",["hot","dot","dog","lot","log"]) """ if not beginWord or not endWord or endWord not in wordList: return 0 all_chars: List[str] = [chr(i) for i in range(ord('a'), ord('z') + 1)] curr_word_set: Set[str] = {beginWord} # 当前层的节点 end_word_set: Set[str] = {endWord} # 目标层的节点 word_set: Set[str] = set(wordList) # 加速单词是否在字典中的判断 level: int = 1 while curr_word_set: # 避免同层节点临接 level += 1 for cw in curr_word_set: # beginWord不重复出现在wordList(word_set) if cw != beginWord: word_set.remove(cw) tmp_set: Set[str] = set() for curr_word in curr_word_set: for i, w in enumerate(curr_word): for letter in all_chars: if w == letter: continue changed: str = curr_word[:i] + letter + curr_word[i + 1:] if changed in end_word_set: return level if changed in word_set: tmp_set.add(changed) # 让层节点最多的层作为目标层 if len(tmp_set) <= len(end_word_set): curr_word_set = tmp_set else: # 逆转方向 curr_word_set = end_word_set end_word_set = tmp_set return 0
020f3ffd2e009b682a47ff9aad8d1d6025c29f37
708,138
def setup_option(request): """Создаем объект для удобство работы с переменными в тестовых методах """ setup_parameters = {} if request.config.getoption('--site_url'): setup_parameters['site_url'] = request.config.getoption('--site_url') return setup_parameters
49908ee8e1422cc4fd05c6d93a96c00d734cf6d1
708,139
def get_unique_tokens(texts): """ Returns a set of unique tokens. >>> get_unique_tokens(['oeffentl', 'ist', 'oeffentl']) {'oeffentl', 'ist'} """ unique_tokens = set() for text in texts: for token in text: unique_tokens.add(token) return unique_tokens
f9c174b264082b65a328fd9edf9421e7ff7808a2
708,140
def moved_in(nn_orig, nn_proj, i, k): """Determine points that are neighbours in the projection space, but were not neighbours in the original space. nn_orig neighbourhood matrix for original data nn_proj neighbourhood matrix for projection data i index of the point considered k size of the neighbourhood considered Return a list of indices for points which are 'moved in' to point i """ pp = list(nn_proj[i, 1:k + 1]) oo = list(nn_orig[i, 1:k + 1]) for j in oo: if (j in oo) and (j in pp): pp.remove(j) return pp
b63a9b0f53554032fc920aeaf6d3d76b93dd8ab3
708,141
def getStatic(): """ These are "static" params for a smoother application flow and fine tuning of some params Not all functions are implemented yet Returns the necessary Params to run this application """ VISU_PAR = { # ============================================================================= # More general Params # ============================================================================= # does not consider samples which are longer than this value in [s] "delteSampleAbove[s]": 5, # flag for extractring/considering long Samples "extractLongs" : False, # does not consider samples which are longer than this value in [s] "toShort[s]": 0.003, # flag for extractring/considering too short Samples "extractShort" : False, # this might indicate a loop !! "bpmConfidence" : 1, # flag for extractring/considering potential Loops "extractLoops" : False, #compress all features to a range from (0,..,1) ->getFeatureStack() "compress": True, # invert all negative feature values with a total negative correlation ->getPandasCorrelation() "invNegative" : True, # ============================================================================= # Application Modes # ============================================================================= # scriptMode := ("clustering", "get_N_Closest", "analyseWithGT", "optimizer") # "clustering" := group samples into 'n Cluster' not regarding their GT # "get_N_Closest" := select N most similar samples to a reference sample not regarding their GT # requires path of a JSON file which contains the features of one sample (compareFilePath) # requires a number (N) (n_mostSimilar) # "analyseWithGT" := analyse a set of features and evaluate with GT-Labels # it is still possible to cluster within this option and save a landmap and restructure files # "optimizer" := trys a new subset of features and save the new subset, Needs GTs # # the hiearchy of the application mode is: analyseWithGT (when true, most params below are usefull) # clustering (There will be no option to select features compared to GT) # get_N_Closest There will be no option to select features compared to GT) # -> The best Features calculated and saved will be used ->(getBestFile,getBestFeatureSelektion) "scriptMode" : "get_N_Closest", #for get_N_Closest -> This should only contain one file and only the features for one Sample, "compareFilePath" : "../json_data/singleFile/Dirt-SamplesSingle2020-10-06.17:26:55.json", "n_mostSimilar": 25, # path to json files "dirName" : "../json_data/", # saved Features of a sample-library "fileName": "Dirt-Samples2020-09-14.20:53:18.json", # ============================================================================= # Feature selection and Feature subset creation modes # ============================================================================= # A fixed set of Features to select by (the names my vary from old JSON-Files to new ones) "predefinedFeatures" : False, # You can select Features by yourself if you want. It will refers to the predefined featrues # the default set can be generated from the Dirst-samples with suboptimalSearchs default values. "defineYoureOwnFeatureSet" : ['Har-Log_-FACM_10', 'MFCC-4', 'MFCC-7', 'Har-RecChr_-FACM_12','TriChr_Centroid', 'ZeroCrossingRate', 'MFCC-8'], # "defineYoureOwnFeatureSet" : ["Har-TriChr_-FACM_12", "MFCC-10"], # Select all features with correlation > suboptimalSearch.second to GT-Labels # And discard all features with cross correlation > suboptimalSearch.third "suboptimalSearch" : (True,0.3, 0.8), # Only take the nBest Features from suboptimaSearch (-1 := all) "nBest" : 7, # Consider all Features or take an approach of above. "calcAllFeatures": False, #("HillClimber", "Random") optimize features with a) hillclimber b) totaly random # maxxHill is the maximum iterationof the hillclimber/ max repeat for Random # probHill is the probability for each individual feature to get selected # modeHill := ("small", "big", "medium") affects HillClimber # small -> small steps (1-2 changes at a time) # big -> every permutation has equal probability # bigChoice -> bigger steps than "small" but not everything possibe like "big" "optimizer" : "HillClimber", "maxHill" : 500, "probHill": 0.0000001, "modeHill" : "medium", # amount of cluster to consider with Hierarch "nCluster" : 40, # (Hierarch/OPTICS/AffinityPropagation/SpectralClustering) 1st is hierarchial clustering, 2nd is Density based->getClusteringLabels() "clusterAlgo" : "Hierarch", # The mode for hierarchichal clustering. ward = minimum variance, average = minimum of average, complete = maximum of each cluster, single = minimum of each cluster "hierarchMode" : "average", # ============================================================================= # Output Params (save files to folder | draw landmap) # ============================================================================= # save folder for copying all audio files "saveFolder" : '../estimateSongs/', # restructure all files within their new assigned cluster Group/ # if mode is n_mostSimilar, it is an folder which contains the n_mostSimilar samples "copyFilesToFolder" : True, # draw a distance landmap with graphviz. "graphviz": False, # graphvizMode := ("clusterBased", "oneFilePerCluster", "minimalSpan") : # "minimalSpan" = draw one big landmap without clusters as minimal span tree (not recommended for all Files) # "clusterBased" = draw seperate clusters in one big landmap | # "oneFilePerCluster" = generate one landmap file per cluster) "graphvizMode" : "minimalSpan" } # Same Params for Spectral Clustering. This approach be will not be taken further SpectralClusterParam = {"assign_labels":"kmeans", #{‘kmeans’, ‘discretize’} default kmeans, "eigen_solver": "amg", } VISU_PAR = {**VISU_PAR, **SpectralClusterParam} return VISU_PAR
f82ed9c4156b8199be924fc1ed62398fcbad9e0c
708,142
def pagenav(object_list, base_url, order_by, reverse, cur_month, is_paginated, paginator): """Display page navigation for given list of objects""" return {'object_list': object_list, 'base_url': base_url, 'order_by': order_by, 'reverse': reverse, 'cur_month': cur_month, 'is_paginated': is_paginated, 'paginator': paginator}
eb61fb76dd32b8d0b3e264e77ce912766d3e38da
708,143
def scale(val, src, dst): """ Scale the given value from the scale of src to the scale of dst. val: float or int src: tuple dst: tuple example: print(scale(99, (0.0, 99.0), (-1.0, +1.0))) """ return (float(val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
26cfaccaeea861ccecb36697838710c0ab706520
708,145
def add(c1, c2): """Add two encrypted counters""" a1, b1 = c1 a2, b2 = c2 return (a1 + a2, b1 + b2)
d3e519524fac558622f692a46ffb8fed9899176f
708,146
def error_message(error, text): """ Gives default or custom text for the error. -------------------- Inputs <datatype>: - error <Error Object>: The error code - text <string>: Custom error text if error has no message Returns <datatype>: - error description <string>: The custom error description or default """ try: return error.description['message'] except TypeError: return text
466fec2d2abefc9f05a3f0adf569fba1c63ea4c1
708,147
def no_rbac_suffix_in_test_filename(filename): """Check that RBAC filenames end with "_rbac" suffix. P101 """ if "patrole_tempest_plugin/tests/api" in filename: if filename.endswith('rbac_base.py'): return if not filename.endswith('_rbac.py'): return 0, "RBAC test filenames must end in _rbac suffix"
6ebfcede8b6e30f24f5ecc1f9d3f0985bd4c44fa
708,148
def minimax(just_mapping, mapping): """ Scale the mapping to minimize the maximum error from just intonation. """ least_error = float("inf") best_mapping = mapping for i in range(len(just_mapping)): for j in range(i+1, len(just_mapping)): candidate = mapping / (mapping[i] + mapping[j]) * (just_mapping[i] + just_mapping[j]) error = abs(just_mapping - candidate).max() if error < least_error: least_error = error best_mapping = candidate return best_mapping
b2226de7a916e3075327cd30c64e7412e186027d
708,151
def transform_color(color1, color2, skipR=1, skipG=1, skipB=1): """ transform_color(color1, color2, skipR=1, skipG=1, skipB=1) This function takes 2 color1 and color2 RGB color arguments, and then returns a list of colors in-between the color1 and color2 eg- tj.transform_color([0,0,0],[10,10,20]) returns a list:- [[0, 0, 0], [1, 1, 1], [2, 2, 2] ... [9, 9, 9], [10, 10, 10], [10, 10, 11] ... [10, 10, 20]] This function is very useful for creating color fade or color transition effects in pygame. There are 3 optional arguments, which are skip arguments set to 1 by default. """ L = [] if (color1[0] < color2[0]): i = list(range(color1[0], color2[0] + 1, skipR)) else: i = list(range(color2[0], color1[0] + 1, skipR))[::-1] if i == []: i = [color1[0]] if (color1[1] < color2[1]): j = list(range(color1[1], color2[1] + 1, skipG)) else: j = list(range(color2[1], color1[1] + 1, skipG))[::-1] if j == []: j = [color1[1]] if (color1[2] < color2[2]): k = list(range(color1[2], color2[2] + 1, skipB)) else: k = list(range(color2[2], color1[2] + 1, skipB))[::-1] if k == []: k = [color1[2]] x = max(len(i), len(j), len(k)) for m in range(len(i), x): i += [i[-1]] for m in range(len(j), x): j += [j[-1]] for m in range(len(k), x): k += [k[-1]] for m in range(x): l = [i[m], j[m], k[m]] L += [l] return L
5f04daa951c59b0445387b2dc988ab7efb98aff4
708,152
import os def create_experiment_dirs(exp_dir): """ Create Directories of a regular tensorflow experiment directory :param exp_dir: :return summary_dir, checkpoint_dir: """ experiment_dir = os.path.realpath(os.path.join(os.path.dirname(__file__))) + "/experiments/" + exp_dir + "/" summary_dir = experiment_dir + 'summaries/' checkpoint_dir = experiment_dir + 'checkpoints/' output_dir = experiment_dir + 'output/' test_dir = experiment_dir + 'test/' dirs = [summary_dir, checkpoint_dir, output_dir, test_dir] try: for dir_ in dirs: if not os.path.exists(dir_): os.makedirs(dir_) print("Experiment directories created") return experiment_dir, summary_dir, checkpoint_dir, output_dir, test_dir except Exception as err: print("Creating directories error: {0}".format(err)) exit(-1)
9707152a57a322ad2d935c2614386218c2c66ee9
708,153
def get_go_module_path(package): """assumption: package name starts with <host>/org/repo""" return "/".join(package.split("/")[3:])
1443d59391a36c7b9ba1d72ade9fd51f11cc1cc3
708,154
def generate_paths(data, path=''): """Iterate the json schema file and generate a list of all of the XPath-like expression for each primitive value. An asterisk * represents an array of items.""" paths = [] if isinstance(data, dict): if len(data) == 0: paths.append(f'{path}') else: for key, val in data.items(): if key == 'type': if isinstance(val, list): types = set(val) else: types = {val} if types.isdisjoint({'object', 'array'}): paths.append(f'{path}') elif key == 'properties': paths.extend(generate_paths(val, path)) else: if key == 'items': key = '*' paths.extend(generate_paths(val, f'{path}/{key}')) return paths
367f244b44c254b077907ff8b219186bd820fccd
708,155
def order_rep(dumper, data): """ YAML Dumper to represent OrderedDict """ return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items(), flow_style=False)
6b455d49cd5702324f4b1e825dabb4af90734730
708,157
def make1d(u, v, num_cols=224): """Make a 2D image index linear. """ return (u * num_cols + v).astype("int")
1f37c7ae06071ce641561eadc1d0a42a0b74508d
708,158
from datetime import datetime def header_to_date(header): """ return the initial date based on the header of an ascii file""" try: starttime = datetime.strptime(header[2], '%Y%m%d_%H%M') except ValueError: try: starttime = datetime.strptime( header[2] + '_' + header[3], '%Y%m%d_%H' ) except ValueError: print("Warning: could not retrieve starttime from header,\ setting to default value ") starttime = datetime(1970, 1, 1) return starttime
3e2757ae39a2a9008a5f0fb8cd8fe031770c83ad
708,159
import copy def permutationwithparity(n): """Returns a list of all permutation of n integers, with its first element being the parity""" if (n == 1): result = [[1,1]] return result else: result = permutationwithparity(n-1) newresult = [] for shorterpermutation in result: for position in range(1,n+1): parity = shorterpermutation[0] for swaps in range(n-position): parity = - parity newpermutation = copy.deepcopy(shorterpermutation) newpermutation.insert(position,n) newpermutation[0] = parity newresult.append(newpermutation) return newresult
218b728c2118a8cca98c019dff036e0ae2593974
708,161
def gravitationalPotentialEnergy(mass, gravity, y): """1 J = 1 N*m = 1 Kg*m**2/s**2 Variables: m=mass g=gravity constant y=height Usage: Energy stored by springs""" U = mass*gravity*y return U
f4fcfc9e7ddac8b246b2200e3886b79f6706936e
708,162
def to_list(obj): """List Converter Takes any object and converts it to a `list`. If the object is already a `list` it is just returned, If the object is None an empty `list` is returned, Else a `list` is created with the object as it's first element. Args: obj (any object): the object to be converted Returns: A list containing the given object """ if isinstance(obj, list): return obj elif isinstance(obj, tuple): return list(obj) elif obj is None: return [] else: return [obj, ]
3ca373867ea3c30edcf7267bba69ef2ee3c7722e
708,163
def function(): """ >>> function() 'decorated function' """ return 'function'
46b892fb70b5672909d87efcf76ffd3f96f9cf7f
708,164
def load_stopwords(file_path): """ :param file_path: Stop word file path :return: Stop word list """ stopwords = [line.strip() for line in open(file_path, 'r', encoding='utf-8').readlines()] return stopwords
9cb6578b5cbc608bc72da7c4f363b4f84d0adbb7
708,165
from datetime import datetime import uuid def versioneer(): """ Function used to generate a new version string when saving a new Service bundle. User can also override this function to get a customized version format """ date_string = datetime.now().strftime("%Y%m%d") random_hash = uuid.uuid4().hex[:6].upper() # Example output: '20191009_D246ED' return date_string + "_" + random_hash
7c5123d28e3bee45f2c9f7d519e830cf80e9fea8
708,167
import subprocess def get_git_revision_hash(): """Returns the git version of this project""" return subprocess.check_output( ["git", "describe", "--always"], universal_newlines=True )
51c76a0e814cd8336d488c6d66a17ba3422c5c66
708,168
def train_test_split(df, frac): """ Create a Train/Test split function for a dataframe and return both the Training and Testing sets. Frac refers to the percent of data you would like to set aside for training. """ frac = round(len(df)*frac) train = df[:frac] test = df[frac:] return train, test
8e233e017a261141f57f7b2bff9a527e275d2ed9
708,169
def filter_imgs(df, properties = [], values = []): """Filters pandas dataframe according to properties and a range of values Input: df - Pandas dataframe properties - Array of column names to be filtered values - Array of tuples containing bounds for each filter Output: df - Filtered dataframe """ for i, val in enumerate(properties): df = df.loc[(df[val] > values[i][0]) & (df[val] < values[i][1])] return df
cdc5c8bfef10fae60f48cee743df049581a0df04
708,170
from typing import Dict from typing import Any from typing import Optional def _add_extra_kwargs( kwargs: Dict[str, Any], extra_kwargs: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """ Safely add additional keyword arguments to an existing dictionary Parameters ---------- kwargs : dict Keyword argument dictionary extra_kwargs : dict, default None Keyword argument dictionary to add Returns ------- dict Keyword dictionary with added keyword arguments Notes ----- There is no checking for duplicate keys """ if extra_kwargs is None: return kwargs else: kwargs_copy = kwargs.copy() kwargs_copy.update(extra_kwargs) return kwargs_copy
cfc4c17f608c0b7fe1ae3046dc220d385c890caa
708,171
import random import math def Hiker(n,xLst,yLst,dist): """ Hiker is a function to generate lists of x and y coordinates of n steps for a random walk of n steps along with distance between the first and last point """ x0=0 y0=0 x=x0 y=y0 xLst[1] = x0 yLst[1] = y0 for i in range (n-1): rnum = random.random() if rnum <= 0.19: y=y+1 x=x elif rnum <= 0.43: y=y+1 x=x+1 elif rnum <= 0.60: y=y x=x+1 elif rnum <= 0.70: y = y-1 x= x+1 elif rnum <= 0.72: y = y-1 x = x elif rnum <= 0.75: y = y-1 x = x-1 elif rnum <= 0.85: y = y x = x-1 elif rnum <= 1.00: y = y+1 x = x-1 xLst[i+1] = x yLst[i+1] = y dist = math.sqrt ((x-x0)^2 + (y-y0)^2) return (xLst,yLst,dist)
abe341c8ecdc579de2b72f5af1ace3f07dd40dc3
708,172
def correct_repeat_line(): """ Matches repeat spec above """ return "2|1|2|3|4|5|6|7"
b9c1e48c5043a042b9f6a6253cba6ae8ce1ca32c
708,173
def char_decoding(value): """ Decode from 'UTF-8' string to unicode. :param value: :return: """ if isinstance(value, bytes): return value.decode('utf-8') # return directly if unicode or exc happens. return value
b8054b4a5012a6e23e2c08b6ff063cf3f71d6863
708,174
def get_relationship_length_fam_mean(data): """Calculate mean length of relationship for families DataDef 43 Arguments: data - data frames to fulfill definiton id Modifies: Nothing Returns: added_members mean_relationship_length - mean relationship length of families """ families = data[1] return families['max_days_since_first_service'].mean()
4d9b76c4dca3e1f09e7dd2684bd96e25792177fd
708,176
import os def read_envs(): """Function will read in all environment variables into a dictionary :returns: Dictionary containing all environment variables or defaults :rtype: dict """ envs = {} envs['QUEUE_INIT_TIMEOUT'] = os.environ.get('QUEUE_INIT_TIMEOUT', '3600') envs['VALIDATION_TIMEOUT'] = os.environ.get('VALIDATION_TIMEOUT', '28800') envs['VALIDATION_HOME'] = os.environ.get('VALIDATION_HOME', '/opt/aif-validator') envs['VALIDATION_FLAGS'] = os.environ.get('VALIDATION_FLAGS') envs['S3_VALIDATION_BUCKET'] = os.environ.get('S3_VALIDATION_BUCKET') envs['S3_VALIDATION_PREFIX'] = os.environ.get('S3_VALIDATION_PREFIX') envs['AWS_BATCH_JOB_ID'] = os.environ.get('AWS_BATCH_JOB_ID') envs['AWS_BATCH_JOB_NODE_INDEX'] = os.environ.get('AWS_BATCH_JOB_NODE_INDEX') envs['AWS_DEFAULT_REGION'] = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1') return envs
2d357796321d561a735461d425fa7c703082434c
708,177
import numpy def load_catalog_npy(catalog_path): """ Load a numpy catalog (extension ".npy") @param catalog_path: str @return record array """ return numpy.load(catalog_path)
912281ad17b043c6912075144e6a2ff3d849a391
708,178
import torch def exp2(input, *args, **kwargs): """ Computes the base two exponential function of ``input``. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.exp2(ttorch.tensor([-4.0, -1.0, 0, 2.0, 4.8, 8.0])) tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02]) >>> ttorch.exp2(ttorch.tensor({ ... 'a': [-4.0, -1.0, 0, 2.0, 4.8, 8.0], ... 'b': {'x': [[-2.0, 1.2, 0.25], ... [16.0, 3.75, -2.34]]}, ... })) <Tensor 0x7ff90a4c3af0> ├── a --> tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02]) └── b --> <Tensor 0x7ff90a4c3be0> └── x --> tensor([[2.5000e-01, 2.2974e+00, 1.1892e+00], [6.5536e+04, 1.3454e+01, 1.9751e-01]]) """ return torch.exp2(input, *args, **kwargs)
17cbc0917acf19932ec4d3a89de8d78545d02e31
708,179
import pprint import json def tryJsonOrPlain(text): """Return json formatted, if possible. Otherwise just return.""" try: return pprint.pformat( json.loads( text ), indent=1 ) except: return text
2431479abf6ab3c17ea63356ec740840d2d18a74
708,180
def get_pool_health(pool): """ Get ZFS list info. """ pool_name = pool.split()[0] pool_capacity = pool.split()[6] pool_health = pool.split()[9] return pool_name, pool_capacity, pool_health
1a9dbb8477d8735b225afc2bdd683f550602b36e
708,181
def sum_fib_dp(m, n): """ A dynamic programming version. """ if m > n: m, n = n, m large, small = 1, 0 # a running sum for Fibbo m ~ n + 1 running = 0 # dynamically update the two variables for i in range(n): large, small = large + small, large # note that (i + 1) -> small is basically mapping m -> F[m] if m <= i + 1 <= n: running += small return running
5be6e57ddf54d185ca6d17adebd847d0bc2f56fc
708,183
def fibo_dyn2(n): """ return the n-th fibonacci number """ if n < 2: return 1 else: a, b = 1, 1 for _ in range(1,n): a, b = b, a+b return b
e8483e672914e20c6e7b892f3dab8fb299bac6fc
708,184
import io def parse_file(fname, is_true=True): """Parse file to get labels.""" labels = [] with io.open(fname, "r", encoding="utf-8", errors="igore") as fin: for line in fin: label = line.strip().split()[0] if is_true: assert label[:9] == "__label__" label = label[9:] labels.append(label) return labels
ea6cbd4b1a272f472f8a75e1cc87a2209e439205
708,185
import optparse def _OptionParser(): """Returns the options parser for run-bisect-perf-regression.py.""" usage = ('%prog [options] [-- chromium-options]\n' 'Used by a try bot to run the bisection script using the parameters' ' provided in the auto_bisect/bisect.cfg file.') parser = optparse.OptionParser(usage=usage) parser.add_option('-w', '--working_directory', type='str', help='A working directory to supply to the bisection ' 'script, which will use it as the location to checkout ' 'a copy of the chromium depot.') parser.add_option('-p', '--path_to_goma', type='str', help='Path to goma directory. If this is supplied, goma ' 'builds will be enabled.') parser.add_option('--path_to_config', type='str', help='Path to the config file to use. If this is supplied, ' 'the bisect script will use this to override the default ' 'config file path. The script will attempt to load it ' 'as a bisect config first, then a perf config.') parser.add_option('--extra_src', type='str', help='Path to extra source file. If this is supplied, ' 'bisect script will use this to override default behavior.') parser.add_option('--dry_run', action="store_true", help='The script will perform the full bisect, but ' 'without syncing, building, or running the performance ' 'tests.') return parser
7485db294d89732c2c5223a3e3fe0b7773444b49
708,186
import json import os def load_config(filename): """ Returns: dict """ config = json.load(open(filename, 'r')) # back-compat if 'csvFile' in config: config['modelCategoryFile'] = config['csvFile'] del config['csvFile'] required_files = ["prefix", "modelCategoryFile", "colorFile"] for f in required_files: assert f in config, 'Invalid config! key <{}> is missing!'.format(f) assert os.path.exists(config[f]), 'Invalid config! path <{}> not exists!'.format(config[f]) if ('File' in f): assert os.path.isfile(config[f]), 'Invalid config! <{}> is not a valid file!'.format(config[f]) return config
af9f6cb02925d38077652703813b9fec201f12f7
708,187
def convert(chinese): """converts Chinese numbers to int in: string out: string """ numbers = {'零':0, '一':1, '二':2, '三':3, '四':4, '五':5, '六':6, '七':7, '八':8, '九':9, '壹':1, '贰':2, '叁':3, '肆':4, '伍':5, '陆':6, '柒':7, '捌':8, '玖':9, '两':2, '廿':20, '卅':30, '卌':40, '虚':50, '圆':60, '近':70, '枯':80, '无':90} units = {'个':1, '十':10, '百':100, '千':1000, '万':10000, '亿':100000000,'万亿':1000000000000, '拾':10, '佰':100, '仟':1000} number, pureNumber = 0, True for i in range(len(chinese)): if chinese[i] in units or chinese[i] in ['廿', '卅', '卌', '虚', '圆', '近', '枯', '无']: pureNumber = False break if chinese[i] in numbers: number = number * 10 + numbers[chinese[i]] if pureNumber: return number number = 0 for i in range(len(chinese)): if chinese[i] in numbers or chinese[i] == '十' and (i == 0 or chinese[i - 1] not in numbers or chinese[i - 1] == '零'): base, currentUnit = 10 if chinese[i] == '十' and (i == 0 or chinese[i] == '十' and chinese[i - 1] not in numbers or chinese[i - 1] == '零') else numbers[chinese[i]], '个' for j in range(i + 1, len(chinese)): if chinese[j] in units: if units[chinese[j]] >= units[currentUnit]: base, currentUnit = base * units[chinese[j]], chinese[j] number = number + base return number
cf2ece895698e2d99fde815efa0339687eadda97
708,188
def getjflag(job): """Returns flag if job in finished state""" return 1 if job['jobstatus'] in ('finished', 'failed', 'cancelled', 'closed') else 0
bf0c0a85cb1af954d25f4350e55b9e3604cf7c79
708,189
def construct_pos_line(elem, coor, tags): """ Do the opposite of the parse_pos_line """ line = "{elem} {x:.10f} {y:.10f} {z:.10f} {tags}" return line.format(elem=elem, x=coor[0], y=coor[1], z=coor[2], tags=tags)
21ca509131c85a2c7bc24d00a28e7d4ea580a49a
708,191
def convert_time(time): """Convert given time to srt format.""" stime = '%(hours)02d:%(minutes)02d:%(seconds)02d,%(milliseconds)03d' % \ {'hours': time / 3600, 'minutes': (time % 3600) / 60, 'seconds': time % 60, 'milliseconds': (time % 1) * 1000} return stime
948e6567c8bc17ccb5f98cf8c8eaf8fe6e8d0bec
708,192
def Returns1(target_bitrate, result): """Score function that returns a constant value.""" # pylint: disable=W0613 return 1.0
727e58e0d6d596cf4833ca3ca1cbcec6b9eedced
708,193
import re def remove_repeats(msg): """ This function removes repeated characters from text. :param/return msg: String """ # twitter specific repeats msg = re.sub(r"(.)\1{2,}", r"\1\1\1", msg) # characters repeated 3 or more times # laughs msg = re.sub(r"(ja|Ja)(ja|Ja)+(j)?", r"jaja", msg) # spanish msg = re.sub(r"(rs|Rs)(Rs|rs)+(r)?", r"rsrs", msg) # portugese msg = re.sub(r"(ha|Ha)(Ha|ha)+(h)?", r"haha", msg) # english return msg
590ab42f74deaa9f8dc1eb9c8b11d81622db2e6d
708,194
def _legend_main_get(project, row): """ forma la leyenda de la serie principal del gráfico input project: es el tag project del proyecto seleccionado en fichero XYplus_parameters.f_xml -en XYplus_main.py- row: es fila activa devuelta por select_master) de donde se extrae el título del gráfico return un str con la leyenda del punto principal del gráfico """ legend_master = project.find('graph/legend_master').text.strip() columns_master = project.findall('graph/legend_master/column') if len(columns_master) == 0: return legend_master subs = [row[int(col1.text)-1] for col1 in columns_master] return legend_master.format(*subs)
3938d723bd44a67313b86f956464fd186ef25386
708,195
def repr_should_be_defined(obj): """Checks the obj.__repr__() method is properly defined""" obj_repr = repr(obj) assert isinstance(obj_repr, str) assert obj_repr == obj.__repr__() assert obj_repr.startswith("<") assert obj_repr.endswith(">") return obj_repr
28537f4f48b402a2eba290d8ece9b765eeb9fdc3
708,196
def is_char_token(c: str) -> bool: """Return true for single character tokens.""" return c in ["+", "-", "*", "/", "(", ")"]
3d5691c8c1b9a592987cdba6dd4809cf2c410ee8
708,197
import numpy def _float_arr_to_int_arr(float_arr): """Try to cast array to int64. Return original array if data is not representable.""" int_arr = float_arr.astype(numpy.int64) if numpy.any(int_arr != float_arr): # we either have a float that is too large or NaN return float_arr else: return int_arr
73643757b84ec28ed721608a2176b292d6e90837
708,198
import re def error_038_italic_tag(text): """Fix the error and return (new_text, replacements_count) tuple.""" backup = text (text, count) = re.subn(r"<(i|em)>([^\n<>]+)</\1>", "''\\2''", text, flags=re.I) if re.search(r"</?(?:i|em)>", text, flags=re.I): return (backup, 0) else: return (text, count)
b0c2b571ade01cd483a3ffdc6f5c2bbb873cd13c
708,199
def families_horizontal_correctors(): """.""" return ['CH']
a3f8de3e0d44ea72d2fb98733050b7a2d598c142
708,200
def __load_txt_resource__(path): """ Loads a txt file template :param path: :return: """ txt_file = open(path, "r") return txt_file
9e3632098c297d1f6407559a86f0d8dc7b68ea75
708,201
import torch def policy_improvement(env, V, gamma): """ Obtain an improved policy based on the values @param env: OpenAI Gym environment @param V: policy values @param gamma: discount factor @return: the policy """ n_state = env.observation_space.n n_action = env.action_space.n policy = torch.zeros(n_state) for state in range(n_state): v_actions = torch.zeros(n_action) for action in range(n_action): for trans_prob, new_state, reward, _ in env.env.P[state][action]: v_actions[action] += trans_prob * (reward + gamma * V[new_state]) policy[state] = torch.argmax(v_actions) return policy
10587e5d4fb08158eff06a4305de6c02fc2d878c
708,202
import sys def func(back=2): """ Returns the function name """ return "{}".format(sys._getframe(back).f_code.co_name)
97332c32195418e4bf6dd6427adabbc5c4360580
708,203
def get_A2_const(alpha1, alpha2, lam_c, A1): """Function to compute the constant A2. Args: alpha1 (float): The alpha1 parameter of the WHSCM. alpha2 (float): The alpha2 parameter of the WHSCM. lam_c (float): The switching point between the two exponents of the double power-laws in the WHSCM. A1 (float): The A1 constant of the WHSCM. Returns: A2 (float): The A2 constant of the WHSCM. """ A2 = A1 * (lam_c**(alpha2 - alpha1)) return A2
16fe12e9ef9d72cfe7250cf840e222512409d377
708,205
def unique_list(a_list, unique_func=None, replace=False): """Unique a list like object. - collection: list like object - unique_func: the filter functions to return a hashable sign for unique - replace: the following replace the above with the same sign Return the unique subcollection of collection. Example: data = [(1, 2), (2, 1), (2, 3), (1, 2)] unique_func = lambda x: tuple(sorted(x)) unique(data) -> [(1, 2), (2, 1), (2, 3)] unique(data, unique_func) -> [(1, 2), (2, 3)] unique(data, unique_func, replace=True) -> [(2, 1), (2, 3)] """ unique_func = unique_func or (lambda x: x) result = {} for item in a_list: hashable_sign = unique_func(item) if hashable_sign not in result or replace: result[hashable_sign] = item return list(result.values())
8d7957a8dffc18b82e8a45129ba3634c28dd0d52
708,206
def gap2d_cx(cx): """Accumulates complexity of gap2d into cx = (h, w, flops, params, acts).""" cx["h"] = 1 cx["w"] = 1 return cx
28f6ba5f166f0b21674dfd507871743243fb4737
708,207
from typing import Sequence from typing import Any def find(sequence: Sequence, target_element: Any) -> int: """Find the index of the first occurrence of target_element in sequence. Args: sequence: A sequence which to search through target_element: An element to search in the sequence Returns: The index of target_element's first occurrence, -1 if it was not found or the sequence is empty """ if not sequence: return -1 try: return sequence.index(target_element) except ValueError: return -1
20edfae45baafa218d8d7f37e0409e6f4868b75d
708,209
from pathlib import Path from typing import List from typing import Dict import json def read_nli_data(p: Path) -> List[Dict]: """Read dataset which has been converted to nli form""" with open(p) as f: data = json.load(f) return data
2218d8dc06e3b9adfe89cb780a9ef4e7cb111d14
708,210
def prepare_data_from_stooq(df, to_prediction = False, return_days = 5): """ Prepares data for X, y format from pandas dataframe downloaded from stooq. Y is created as closing price in return_days - opening price Keyword arguments: df -- data frame contaning data from stooq return_days -- number of day frame in which to calculate y. """ if 'Wolumen' in df.columns: df = df.drop(['Data', 'Wolumen', 'LOP'], axis=1) else: df = df.drop('Data', axis = 1) y = df['Zamkniecie'].shift(-return_days) - df['Otwarcie'] if not to_prediction: df = df.iloc[:-return_days,:] y = y[:-return_days]/df['Otwarcie'] return df.values, y
4b5bc45529b70ed1e8517a1d91fb5a6c2ff0b504
708,211
def represents_int_above_0(s: str) -> bool: """Returns value evaluating if a string is an integer > 0. Args: s: A string to check if it wil be a float. Returns: True if it converts to float, False otherwise. """ try: val = int(s) if val > 0: return True else: return False except ValueError: return False
e39c4afeff8f29b86ef2a80be0af475223654449
708,212
def sydney(): """Import most recent Sydney dataset""" d = { 'zip':'Sydney_geol_100k_shape', 'snap':-1, } return(d)
f79a5002ef548769096d3aeb1ad2c7d77ac5ce68
708,213
def format_non_date(value): """Return non-date value as string.""" return_value = None if value: return_value = value return return_value
9a7a13d7d28a14f5e92920cfef7146f9259315ec
708,214
import functools import math def gcd_multiple(*args) -> int: """Return greatest common divisor of integers in args""" return functools.reduce(math.gcd, args)
c686b9495cd45ff047f091e31a79bedcd61f8842
708,215
from typing import Counter def chars_to_family(chars): """Takes a list of characters and constructs a family from them. So, A1B2 would be created from ['B', 'A', 'B'] for example.""" counter = Counter(chars) return "".join(sorted([char + str(n) for char, n in counter.items()]))
e78de779599f332045a98edde2aa0a0edc5a653b
708,216
import configparser def get_config_properties(config_file="config.properties", sections_to_fetch = None): """ Returns the list of properties as a dict of key/value pairs in the file config.properties. :param config_file: filename (string). :param section: name of section to fetch properties from (if specified); all sections are returned by default (iterable). :return: A flat (no sections) Python dictionary of properties. """ cf = configparser.ConfigParser() try: cf.read(config_file) except Exception as e: print("[ERROR] exception {} reading configurations from file {}".format(e, config_file)) properties = {} for section in cf.sections(): # only include args section if requested if (not sections_to_fetch or (section in sections_to_fetch)): for item in cf.items(section): properties[item[0]] = item[1] return properties
627d21327560595bb4c2905c98604926f03ca655
708,217
from typing import Dict def merge(source: Dict, destination: Dict) -> Dict: """ Deep merge two dictionaries Parameters ---------- source: Dict[Any, Any] Dictionary to merge from destination: Dict[Any, Any] Dictionary to merge to Returns ------- Dict[Any, Any] New dictionary with fields in destination overwritten with values from source """ new_dict = {**destination} for key, value in source.items(): if isinstance(value, dict): # get node or create one node = new_dict.get(key, {}) new_dict[key] = merge(value, node) else: new_dict[key] = value return new_dict
4ffba933fe1ea939ecaa9f16452b74a4b3859f40
708,218
import ast def is_string_expr(expr: ast.AST) -> bool: """Check that the expression is a string literal.""" return ( isinstance(expr, ast.Expr) and isinstance(expr.value, ast.Constant) and isinstance(expr.value.value, str) )
f61418b5671c5e11c1e90fce8d90c583659d40e3
708,220
import subprocess def get_current_commit_id() -> str: """Get current commit id. Returns: str: current commit id. """ command = "git rev-parse HEAD" commit_id = ( subprocess.check_output(command.split()).strip().decode("utf-8") # noqa: S603 ) return commit_id
978bd35fc3cfe71fcc133a6e49fbbe0e27d4feda
708,221
import re def get_raw_code(file_path): """ Removes empty lines, leading and trailing whitespaces, single and multi line comments :param file_path: path to .java file :return: list with raw code """ raw_code = [] multi_line_comment = False with open(file_path, "r") as f: for row in f: # remove leading and trailing whitespaces line = row.strip() # remove '/* comments */' line = re.sub(r''' ^ # start of string /\* # "/*" string .* # any character (except line break) zero or more times \*/ # "*/" string \s* # zero or many whitespaces ''', '', line, 0, re.VERBOSE) # remove '//comments' line = re.sub(r''' ^ # start of string // # "//" string .* # any character (except line break) zero or more times $ # end of string ''', '', line, 0, re.VERBOSE) # ignore empty lines if line != '': # skip multi-line comments (/*) if re.search(r''' ^ # start of string /\* # "/*" string .* # any character (except line break) zero or more times ''', line, re.VERBOSE): multi_line_comment = True continue # check if multi-line comment was closed (*/) elif re.search(r''' .* # any character (except line break) zero or more times \*/ # "*/" string $ # end of string ''', line, re.VERBOSE): multi_line_comment = False line = re.sub(r''' .* # any character (except line break) zero or more times \*/ # "*/" string \s* # zero or many whitespaces ''', '', line, 0, re.VERBOSE) if line == '': continue # add line if it's not multi-line comment if not multi_line_comment: raw_code.append(line) return raw_code
6654a0423f024eaea3067c557984c3aa5e9494da
708,222
from typing import Pattern import re def _yaml_comment_regex() -> Pattern: """ From https://yaml-multiline.info/, it states that `#` cannot appear *after* a space or a newline, otherwise it will be a syntax error (for multiline strings that don't use a block scalar). This applies to single lines as well: for example, `a#b` will be treated as a single value, but `a #b` will only capture `a`, leaving `#b` as a comment. For lines that *do* use a block scalar, the YAML parser will throw a syntax error if there is additional text on the same line as the block scalar. Comments however, are fine. e.g. key: | # this is ok blah key: | but this is not blah Given that we've made it to this stage, we can assume the YAML file is syntactically correct. Therefore, if we add whitespace before the comment character, we can know that everything else *after* the comment character is a comment for a given line. """ return re.compile(r'(\s+#[\S ]*)')
3b5739f460c3d2c66f802dd46e061d2d07030525
708,223
import re def format_ipc_dimension(number: float, decimal_places: int = 2) -> str: """ Format a dimension (e.g. lead span or height) according to IPC rules. """ formatted = '{:.2f}'.format(number) stripped = re.sub(r'^0\.', '', formatted) return stripped.replace('.', '')
60001f99b5f107faba19c664f90ee2e9fb61fe68
708,224
def num_in_row(board, row, num): """True if num is already in the row, False otherwise""" return num in board[row]
ca9ab9de4514740e25e0c55f3613d03b2844cdb8
708,225
def factorial_3(n, acc=1): """ Replace all recursive tail calls f(x=x1, y=y1, ...) with (x, y, ...) = (x1, y1, ...); continue """ while True: if n < 2: return 1 * acc (n, acc) = (n - 1, acc * n) continue break
e067cf4564056bf488e56fe58bbd5b998b0175f3
708,226
def mod(a1, a2): """ Function to give the remainder """ return a1 % a2
f5c03a952aed373e43933bafe37dbc75e796b74d
708,227
def encode_string(s): """ Simple utility function to make sure a string is proper to be used in a SQL query EXAMPLE: That's my boy! -> N'That''s my boy!' """ res = "N'"+s.replace("'","''")+"'" res = res.replace("\\''","''") res = res.replace("\''","''") return res
814822b9aa15def24f98b2b280ab899a3f7ea617
708,228
import subprocess def get_sha_from_ref(repo_url, reference): """ Returns the sha corresponding to the reference for a repo :param repo_url: location of the git repository :param reference: reference of the branch :returns: utf-8 encoded string of the SHA found by the git command """ # Using subprocess instead of convoluted git libraries. # Any rc != 0 will be throwing an exception, so we don't have to care out = subprocess.check_output( ["git", "ls-remote", "--exit-code", repo_url, reference] ) # out is a b'' type string always finishing up with a newline # construct list of (ref,sha) refs = [ (line.split(b"\t")[1], line.split(b"\t")[0]) for line in out.split(b"\n") if line != b"" and b"^{}" not in line ] if len(refs) > 1: raise ValueError( "More than one ref for reference %s, please be more explicit %s" % (reference, refs) ) return refs[0][1].decode("utf-8")
d7ab3e98217fa57e0831a6df94d34f1cf45e3d97
708,230
def get_motif_proteins(meme_db_file): """ Hash motif_id's to protein names using the MEME DB file """ motif_protein = {} for line in open(meme_db_file): a = line.split() if len(a) > 0 and a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein
88e42b84314593a965e7dd681ded612914e35629
708,231
import sys def in_ipython() -> bool: """try to detect whether we are in an ipython shell, e.g., a jupyter notebook""" ipy_module = sys.modules.get("IPython") if ipy_module: return bool(ipy_module.get_ipython()) else: return False
7a6804b964bd7fbde6d5795da953954343575413
708,232