content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def val2str(val): """Writes values to a string. Args: val (any): Any object that should be represented by a string. Returns: valstr (str): String representation of `val`. """ # Return the input if it's a string if isinstance(val,str ): valstr=val # Handle types where spaces are added elif isinstance(val,tuple): valstr=repr(val).replace(', ',',') elif isinstance(val,list ): valstr=repr(val).replace(', ',',') elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':') # Otherwise use repr() else: valstr=repr(val) # Return output return valstr
c8f26553ceeeef841239c534815f86293f91086a
2,542
import glob def getFiles(regex, camera, mjdToIngest = None, mjdthreshold = None, days = None, atlasroot='/atlas/', options = None): """getFiles. Args: regex: camera: mjdToIngest: mjdthreshold: days: atlasroot: options: """ # If mjdToIngest is defined, ignore mjdThreshold. If neither # are defined, grab all the files. # Don't use find, use glob. It treats the whole argument as a regex. # e.g. directory = "/atlas/diff/" + camera "/5[0-9][0-9][0-9][0-9]", regex = *.ddc if mjdToIngest: if options is not None and options.difflocation is not None: directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdToIngest)) else: directory = atlasroot + "diff/" + camera + "/" + str(mjdToIngest) fileList = glob.glob(directory + '/' + regex) else: if mjdthreshold and days: fileList = [] for day in range(days): if options is not None and options.difflocation is not None: directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdthreshold + day)) else: directory = atlasroot + "diff/" + camera + "/%d" % (mjdthreshold + day) files = glob.glob(directory + '/' + regex) if files: fileList += files else: if options is not None and options.difflocation is not None: directory = options.difflocation.replace('CAMERA', camera).replace('MJD', '/[56][0-9][0-9][0-9][0-9]') else: directory = atlasroot + "diff/" + camera + "/[56][0-9][0-9][0-9][0-9]" fileList = glob.glob(directory + '/' + regex) fileList.sort() return fileList
8d61d2e1900413d55e2cfc590fb6c969dd31b441
2,544
def return_limit(x): """Returns the standardized values of the series""" dizionario_limite = {'BENZENE': 5, 'NO2': 200, 'O3': 180, 'PM10': 50, 'PM2.5': 25} return dizionario_limite[x]
92d40eaef7b47c3a20b9bcf1f7fd72510a05d9b2
2,545
def npaths(x, y): """ Count paths recursively. Memoizing makes this efficient. """ if x>0 and y>0: return npaths(x-1, y) + npaths(x, y-1) if x>0: return npaths(x-1, y) if y>0: return npaths(x, y-1) return 1
487a1f35b1bf825ffaf6bbf1ed86eb51f6cf18e9
2,546
import re def joinAges(dataDict): """Merges columns by county, dropping ages""" popColumns = list(dataDict.values())[0].columns.tolist() popColumns = [re.sub("[^0-9]", "", column) for column in popColumns] dictOut = dict() for compartmentName, table in dataDict.items(): table.columns = popColumns dictOut[compartmentName] = table.sum(axis=1, level=0) return dictOut
d83ee4883ba58f7090141c131c4e111a4805f15d
2,547
def alias(*alias): """Select a (list of) alias(es).""" valias = [t for t in alias] return {"alias": valias}
b2ff51f33b601468b1ba4d371bd5abd6d013a188
2,549
import json def read_json_info(fname): """ Parse info from the video information file. Returns: Dictionary containing information on podcast episode. """ with open(fname) as fin: return json.load(fin)
1eed945ce2917cbca1fb807a807ab57229622374
2,550
import os def create_output_directory(input_directory): """Creates new directory and returns its path""" output_directory = '' increment = 0 done_creating_directory = False while not done_creating_directory: try: if input_directory.endswith('/'): output_directory = input_directory + 'converted' else: output_directory = input_directory + '/converted' if increment is not 0: output_directory += str(increment) os.makedirs(output_directory, exist_ok=False) done_creating_directory = True except FileExistsError: increment += 1 return output_directory
b2496045e8c9fbd627c32ea40a7b77181b7f4c1d
2,551
def change_box(base_image,box,change_array): """ Assumption 1: Contents of box are as follows [x1 ,y2 ,width ,height] """ height, width, _ = base_image.shape new_box = [0,0,0,0] for i,value in enumerate(change_array): if value != 0: new_box[i] = box[i] + value else: new_box[i] = box[i] assert new_box[0] >= 0 assert new_box[1] >= 0 assert new_box[0]+new_box[2] <= width assert new_box[1]+new_box[3] <= height return new_box
960b9f2c3ab1b65e9c7a708eac700dfaf65c67ac
2,552
import hashlib def get_hash_name(feed_id): """ 用户提交的订阅源,根据hash值生成唯一标识 """ return hashlib.md5(feed_id.encode('utf8')).hexdigest()
edd1caf943635a091c79831cc6151ecfa840e435
2,555
def trip2str(trip): """ Pretty-printing. """ header = "{} {} {} - {}:".format(trip['departureTime'], trip['departureDate'], trip['origin'], trip['destination']) output = [header] for subtrip in trip['trip']: originstr = u'{}....{}'.format(subtrip['departureTime'], subtrip['origin']) output.append(originstr) for subsubtrip in subtrip['trip']: t = subsubtrip['arrivalTime'] d = subsubtrip['stop'] intermediatestr = t+u'.'*8+d output.append(intermediatestr) destinationstr = u'{}....{}'.format(subtrip['arrivalTime'], subtrip['destination']) output.append(destinationstr) return "\n".join(output)
67daf3feb6b81d40d3102a8c610b20e68571b131
2,557
def station_suffix(station_type): """ Simple switch, map specific types on to single letter. """ suffix = ' (No Dock)' if 'Planetary' in station_type and station_type != 'Planetary Settlement': suffix = ' (P)' elif 'Starport' in station_type: suffix = ' (L)' elif 'Asteroid' in station_type: suffix = ' (AB)' elif 'Outpost' in station_type: suffix = ' (M)' elif 'Carrier' in station_type: suffix = ' (C)' return suffix
c28c4d3f0da8401ffc0721a984ec2b2e2cd50b24
2,558
import csv def import_capitals_from_csv(path): """Imports a dictionary that maps country names to capital names. @param string path: The path of the CSV file to import this data from. @return dict: A dictionary of the format {"Germany": "Berlin", "Finland": "Helsinki", ...} """ capitals = {} with open(path) as capitals_file: reader = csv.reader(capitals_file) for row in reader: country, capital = row[0], row[1] capitals[country] = capital return capitals
3c6a9c91df455cb8721371fe40b248fb7af8d866
2,559
import os import configparser def read_config(config_file='config.ini'): """ Read the configuration file. :param str config_file: Path to the configuration file. :return: """ if os.path.isfile(config_file) is False: raise NameError(config_file, 'not found') config = configparser.ConfigParser() config.read(config_file) return config
0cafb2f280e30467e833d404ac86a8cea03f050a
2,560
import pathlib def example_data(): """Example data setup""" tdata = ( pathlib.Path(__file__).parent.absolute() / "data" / "ident-example-support.txt" ) return tdata
a8c9a88f8850fecc7cc05fb8c9c18e03778f3365
2,562
def add_ending_slash(directory: str) -> str: """add_ending_slash function Args: directory (str): directory that you want to add ending slash Returns: str: directory name with slash at the end Examples: >>> add_ending_slash("./data") "./data/" """ if directory[-1] != "/": directory = directory + "/" return directory
2062a55b59707dd48e5ae56d8d094c806d8a2c1d
2,563
def pre_arrange_cols(dataframe): """ DOCSTRING :param dataframe: :return: """ col_name = dataframe.columns.values[0] dataframe.loc[-1] = col_name dataframe.index = dataframe.index + 1 dataframe = dataframe.sort_index() dataframe = dataframe.rename(index=str, columns={col_name: 'all'}) return dataframe
522c0f4ca29b10d4a736d27f07d8e9dc80cafba5
2,564
import re def extractCompositeFigureStrings(latexString): """ Returns a list of latex figures as strings stripping out captions. """ # extract figures figureStrings = re.findall(r"\\begin{figure}.*?\\end{figure}", latexString, re.S) # filter composite figures only and remove captions (preserving captions in subfigures) figureStrings = [ re.findall(r"\\begin{figure}.*(?=\n.*\\caption)", figureString, re.S)[0] + "\n\\end{figure}" for figureString in figureStrings if "\\begin{subfigure}" in figureString ] return figureStrings
83a80c91890d13a6a0247745835e1ffb97d579f7
2,565
import re def BCA_formula_from_str(BCA_str): """ Get chemical formula string from BCA string Args: BCA_str: BCA ratio string (e.g. 'B3C1A1') """ if len(BCA_str)==6 and BCA_str[:3]=='BCA': # format: BCAxyz. suitable for single-digit integer x,y,z funits = BCA_str[-3:] else: # format: BxCyAz. suitable for multi-digit or non-integer x,y,z funits = re.split('[BCA]',BCA_str) funits = [u for u in funits if len(u) > 0] funits components = ['BaO','CaO','Al2O3'] formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)]) return formula
36375e62d70995628e253ba68ba8b777eb88d728
2,570
import argparse import os def arg_parse(dataset, view, num_shots=2, cv_number=5): """ arguments definition method """ parser = argparse.ArgumentParser(description='Graph Classification') parser.add_argument('--mode', type=str, default='train', choices=['train', 'test']) parser.add_argument('--v', type=str, default=1) parser.add_argument('--data', type=str, default='Sample_dataset', choices = [ f.path[5:] for f in os.scandir("data") if f.is_dir() ]) parser.add_argument('--dataset', type=str, default=dataset, help='Dataset') parser.add_argument('--view', type=int, default=view, help = 'view index in the dataset') parser.add_argument('--num_epochs', type=int, default=1, #50 help='Training Epochs') parser.add_argument('--num_shots', type=int, default=num_shots, #100 help='number of shots') parser.add_argument('--cv_number', type=int, default=cv_number, help='number of validation folds.') parser.add_argument('--NormalizeInputGraphs', default=False, action='store_true', help='Normalize Input adjacency matrices of graphs') parser.add_argument('--evaluation_method', type=str, default='model assessment', help='evaluation method, possible values : model selection, model assessment') parser.add_argument('--threshold', dest='threshold', default='mean', help='threshold the graph adjacency matrix. Possible values: no_threshold, median, mean') parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.') parser.add_argument('--num-classes', dest='num_classes', type=int, default=2, help='Number of label classes') parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.') parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).') parser.add_argument('--hidden', type=int, default=8, help='Number of hidden units.') parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.') parser.add_argument('--dropout', type=float, default=0.8, help='Dropout rate (1 - keep probability).') parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.') return parser.parse_args()
4758fb939584f0433fb669fd03939d54c498f375
2,571
import optparse def ParseArgs(): """Parses command line options. Returns: An options object as from optparse.OptionsParser.parse_args() """ parser = optparse.OptionParser() parser.add_option('--android-sdk', help='path to the Android SDK folder') parser.add_option('--android-sdk-tools', help='path to the Android SDK platform tools folder') parser.add_option('--R-package', help='Java package for generated R.java') parser.add_option('--R-dir', help='directory to hold generated R.java') parser.add_option('--res-dir', help='directory containing resources') parser.add_option('--crunched-res-dir', help='directory to hold crunched resources') (options, args) = parser.parse_args() if args: parser.error('No positional arguments should be given.') # Check that required options have been provided. required_options = ('android_sdk', 'android_sdk_tools', 'R_package', 'R_dir', 'res_dir', 'crunched_res_dir') for option_name in required_options: if getattr(options, option_name) is None: parser.error('--%s is required' % option_name.replace('_', '-')) return options
492894d0cb4faf004f386ee0f4285180d0a6c37d
2,572
import codecs def get_text(string, start, end, bom=True): """This method correctly accesses slices of strings using character start/end offsets referring to UTF-16 encoded bytes. This allows for using character offsets generated by Rosette (and other softwares) that use UTF-16 native string representations under Pythons with UCS-4 support, such as Python 3.3+ (refer to https://www.python.org/dev/peps/pep-0393/). The offsets are adjusted to account for a UTF-16 byte order mark (BOM) (2 bytes) and also that each UTF-16 logical character consumes 2 bytes. 'character' in this context refers to logical characters for the purpose of character offsets; an individual character can consume up to 4 bytes (32 bits for so-called 'wide' characters) and graphemes can consume even more. """ if not isinstance(string, str): raise ValueError('expected string to be of type str') if not any(((start is None), isinstance(start, int))): raise ValueError('expected start to be of type int or NoneType') if not any(((end is None), isinstance(end, int))): raise ValueError('expected end to be of type int or NoneType') if start is not None: start *= 2 if bom: start += 2 if end is not None: end *= 2 if bom: end += 2 utf_16, _ = codecs.utf_16_encode(string) sliced, _ = codecs.utf_16_decode(utf_16[start:end]) return sliced
ffe3c74a248215a82b0e0a5b105f5e4c94c8c2a8
2,573
def merge_date_tags(path, k): """called when encountering only tags in an element ( no text, nor mixed tag and text) Arguments: path {list} -- path of the element containing the tags k {string} -- name of the element containing the tags Returns: whatever type you want -- the value of the element note : if you want """ l=k['#alldata'] #2015/01/01 12:10:30 # if "PubMedPubDate" in path[-1]: if "date" in path[-1].lower(): month=None year=None day=None hour=None minute=None r="" # it should always be a dict with one key, and a subdict as value, containing an "#alldata" key # {'month': {'#alldata': ['09']}} for i in l: # month k = next(iter(i)) # ['09'] ad = i[k]['#alldata'] if k == "Year" and len(ad) == 1 and isinstance (ad[0], str): year=ad[0] elif k == "Month" and len(ad) == 1 and isinstance (ad[0], str): month=ad[0] elif k == "Day" and len(ad) == 1 and isinstance (ad[0], str): day=ad[0] elif k == "Hour" and len(ad) == 1 and isinstance (ad[0], str): hour=ad[0] if len(hour) == 1: hour = "0"+hour elif k == "Minute" and len(ad) == 1 and isinstance (ad[0], str): minute=ad[0] if len(minute) == 1: minute = "0"+minute if year is not None: r=r+year if month is not None: r=r+"/"+month if day is not None: r=r+"/"+day if hour is not None: r=r+ " "+hour if minute is not None: r=r+":"+minute #retrun only if at least "year" is present return r return k
2ae3bd0dada288b138ee450103c0b4412a841336
2,575
def first_item(iterable, default=None): """ Returns the first item of given iterable. Parameters ---------- iterable : iterable Iterable default : object Default value if the iterable is empty. Returns ------- object First iterable item. """ if not iterable: return default for item in iterable: return item
f5ebbaea7cf4152382fb4b2854f68a3320d21fdc
2,577
def rank(value_to_be_ranked, value_providing_rank): """ Returns the rank of ``value_to_be_ranked`` in set of values, ``values``. Works even if ``values`` is a non-orderable collection (e.g., a set). A binary search would be an optimized way of doing this if we can constrain ``values`` to be an ordered collection. """ num_lesser = [v for v in value_providing_rank if v < value_to_be_ranked] return len(num_lesser)
18c2009eb59b62a2a3c63c69d55f84a6f51e5953
2,579
def fixedcase_word(w, truelist=None): """Returns True if w should be fixed-case, None if unsure.""" if truelist is not None and w in truelist: return True if any(c.isupper() for c in w[1:]): # tokenized word with noninitial uppercase return True if len(w) == 1 and w.isupper() and w not in {'A', 'K', 'N'}: # single uppercase letter return True if len(w) == 2 and w[1] == '.' and w[0].isupper(): # initial with period return True
9047866f7117e8b1e4090c8e217c3063cfd37c38
2,580
def get_salesforce_log_files(): """Helper function to get a list available log files""" return { "totalSize": 2, "done": True, "records": [ { "attributes": { "type": "EventLogFile", "url": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ" }, "Id": "0ATD000000001bROAQ", "EventType": "API", "LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ/LogFile", "LogDate": "2014-03-14T00:00:00.000+0000", "LogFileLength": 2692.0 }, { "attributes": { "type": "EventLogFile", "url": "/services/data/v32.0/sobjects/EventLogFile/0ATD000000001SdOAI" }, "Id": "0ATD000000001SdOAI", "EventType": "API", "LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001SdOAI/LogFile", "LogDate": "2014-03-13T00:00:00.000+0000", "LogFileLength": 1345.0 } ] }
1c182898517d73c360e9f2ab36b902afea8c58d7
2,581
def remove_true_false_edges(dict_snapshots, dict_weights, index): """ Remove chosen true edges from the graph so the embedding could be calculated without them. :param dict_snapshots: Dict where keys are times and values are a list of edges for each time stamp. :param dict_weights: Dict where keys are times and values are list of weights for each edge in the time stamp, order corresponds to the order of edges in dict_snapshots. :param index: Index of pivot time- until pivot time (including) it is train set, afterwards it is test set. :return: Updated dict_snapshots and dict_weights. """ times = list(dict_snapshots.keys()) mapping = {i: times[i] for i in range(len(times))} keys = list(mapping.keys()) for key in keys: if key < index: continue else: del dict_snapshots[mapping[key]] del dict_weights[mapping[key]] return dict_snapshots, dict_weights
3f833fda22710c20703aa7590eae0fd649b69634
2,582
def get_specific_pos_value(img, pos): """ Parameters ---------- img : ndarray image data. pos : list pos[0] is horizontal coordinate, pos[1] is verical coordinate. """ return img[pos[1], pos[0]]
3929b29fa307a7e8b5282783c16639cacb2ab805
2,583
import re def mrefresh_to_relurl(content): """Get a relative url from the contents of a metarefresh tag""" urlstart = re.compile('.*URL=') _, url = content.split(';') url = urlstart.sub('', url) return url
90cc3dbace5d4b001698612f9263309fa95aac8b
2,584
import logging def get_previous_version(versions: dict, app: str) -> str: """Looks in the app's .version_history to retrieve the prior version""" try: with open(f"{app}/.version_history", "r") as fh: lines = [line.strip() for line in fh] except FileNotFoundError: logging.warning(f"No .version_history for {app}") return "" if versions[app] != lines[-1]: logging.warning( f"Mismatch in data:\n\tCurrent version is {versions[app]}" f" but most recent line in .version_history is {lines[-1]}" ) return "" elif len(lines) < 2: logging.warning("No prior version recorded") return "" return lines[-2]
d3a4aec5c3bc842181aa3901971774761866c3e5
2,585
def ToHexStr(num): """ 将返回的错误码转换为十六进制显示 :param num: 错误码 字符串 :return: 十六进制字符串 """ chaDic = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'} hexStr = "" if num < 0: num = num + 2**32 while num >= 16: digit = num % 16 hexStr = chaDic.get(digit, str(digit)) + hexStr num //= 16 hexStr = chaDic.get(num, str(num)) + hexStr return hexStr
b6cf482defdc9f4fcf9ce64903e7a718e096bacb
2,587
import requests def getSBMLFromBiomodelsURN(urn): """ Get SBML string from given BioModels URN. Searches for a BioModels identifier in the given urn and retrieves the SBML from biomodels. For example: urn:miriam:biomodels.db:BIOMD0000000003.xml Handles redirects of the download page. :param urn: :return: SBML string for given model urn """ if ":" not in urn: raise ValueError("The URN", urn, "is not in the correct format: it must be divided by colons in a format such as 'urn:miriam:biomodels.db:BIOMD0000000003.xml'.") core = urn.split(":")[-1].split(".")[0] url = "https://www.ebi.ac.uk/biomodels/model/download/" + core + "?filename="+ core + "_url.xml" response = requests.get(url, allow_redirects=True) response.raise_for_status() sbml = response.content # bytes array in py3 try: sbml_str = str(sbml.decode("utf-8")) except: sbml_str = str(sbml) return sbml_str
9a28f4a0619ebed6f9e272d84331482442ae9fb8
2,588
from datetime import datetime def naturalTimeDifference(value): """ Finds the difference between the datetime value given and now() and returns appropriate humanize form """ if isinstance(value, datetime): delta = datetime.now() - value if delta.days > 6: return value.strftime("%b %d") # May 15 if delta.days > 1: return value.strftime("%A") # Wednesday elif delta.days == 1: return 'yesterday' # yesterday elif delta.seconds > 3600: if delta.seconds < 7200: return '1 hour ago' else: return str(delta.seconds / 3600 ) + ' hours ago' # 3 hours ago elif delta.seconds > 60: if delta.seconds < 120: return '1 minute ago' else: return str(delta.seconds/60) + ' minutes ago' # 29 minutes ago elif delta.seconds > 10: return str(delta.seconds) + ' seconds ago' # 15 seconds ago else: return 'a moment ago' # a moment ago return defaultfilters.date(value) else: return str(value)
ce285358b1b99a4b2df460e6193d2a0970aa4eff
2,589
import json def process_info(args): """ Process a single json file """ fname, opts = args with open(fname, 'r') as f: ann = json.load(f) f.close() examples = [] skipped_instances = 0 for instance in ann: components = instance['components'] if len(components[0]['poly']) < 3: continue if 'class_filter'in opts.keys() and instance['label'] not in opts['class_filter']: continue # if instance['image_url'].find('Bhoomi') == -1: # continue candidates = [c for c in components] instance['components'] = candidates if candidates: examples.append(instance) return examples, skipped_instances
8ade5b21db3cca57d9de91311fc57754161673de
2,590
from typing import List def min_offerings(heights: List[int]) -> int: """ Get the max increasing sequence on the left and the right side of current index, leading upto the current index. current index's value would be the max of both + 1. """ length = len(heights) if length < 2: return length left_inc = [0] * length right_inc = [0] * length for index in range(1, length): if heights[index] > heights[index - 1]: left_inc[index] = left_inc[index - 1] + 1 if heights[length - 1 - index] > heights[length - index]: right_inc[length - 1 - index] = right_inc[length - index] + 1 return sum(1 + max(left_inc[index], right_inc[index]) for index in range(length))
952ea82815ecb4db6d4d0347f16b0cf5b299f7d3
2,592
import os def get_csv_file_path(file_name: str) -> str: """ Get absolute path to csv metrics file Parameters ---------- file_name Name of metrics file Returns ------- file_path Full path to csv file """ return os.path.join(os.getcwd(), file_name)
67b80193a75669a0635cf70ab1325e755424d654
2,593
import re def snake_case(name: str): """ https://stackoverflow.com/a/1176023/1371716 """ name = re.sub('(\\.)', r'_', name) name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) name = re.sub('__([A-Z])', r'_\1', name) name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name) return name.lower()
4696ca3c1a50590aa6617ee3917b8364c11f3910
2,596
import glob def get_loss_data(): """ This function returns a list of paths to all .npy loss files. Returns ------- path_list : list of strings The list of paths to output files """ path = "./data/*_loss.npy" path_list = glob.glob(path, recursive=True) return path_list
bc98b0bdf60ac3f7125da82fd68956957e89a777
2,597
import string def list_zero_alphabet() -> list: """Build a list: 0, a, b, c etc.""" score_dirs = ['0'] for char in string.ascii_lowercase: score_dirs.append(char) return score_dirs
6cd9fc9e93257dcc7729235ac3cffa01dbd80c95
2,598
def dim_axis_label(dimensions, separator=', '): """ Returns an axis label for one or more dimensions. """ if not isinstance(dimensions, list): dimensions = [dimensions] return separator.join([d.pprint_label for d in dimensions])
f03e4eb02fc57890421bdcdaa0aea7d6541b8678
2,599
def _is_camel_case_ab(s, index): """Determine if the index is at 'aB', which is the start of a camel token. For example, with 'workAt', this function detects 'kA'.""" return index >= 1 and s[index - 1].islower() and s[index].isupper()
c21ec7d8aa7e786d1ea523106af6f9426fea01d8
2,600
def rgb2hex(rgb: tuple) -> str: """ Converts RGB tuple format to HEX string :param rgb: :return: hex string """ return '#%02x%02x%02x' % rgb
1ecb1ca68fa3dbe7b58f74c2e50f76175e9a0c5a
2,601
import os import shutil def update_copy(src, dest): """ Possibly copy `src` to `dest`. No copy unless `src` exists. Copy if `dest` does not exist, or mtime of dest is older than of `src`. Returns: None """ if os.path.exists(src): if (not os.path.exists(dest) or os.path.getmtime(dest) < os.path.getmtime(src)): shutil.copy(src, dest) return None
4f83e633d9348cf8273309707713060e5611c277
2,602
def unix_to_windows_path(path_to_convert, drive_letter='C'): """ For a string representing a POSIX compatible path (usually starting with either '~' or '/'), returns a string representing an equivalent Windows compatible path together with a drive letter. Parameters ---------- path_to_convert : string A string representing a POSIX path drive_letter : string (Default : 'C') A single character string representing the desired drive letter Returns ------- string A string representing a Windows compatible path. """ if path_to_convert.startswith('~'): path_to_convert = path_to_convert[1:] if path_to_convert.startswith('/'): path_to_convert = path_to_convert[1:] path_to_convert = '{}{}{}'.format(drive_letter, ':\\', path_to_convert).replace('/', '\\') return path_to_convert
d3c23e2c19be4b81be135ae84760430be852da41
2,603
def flatten(iterable): """ Unpacks nested iterables into the root `iterable`. Examples: ```python from flashback.iterating import flatten for item in flatten(["a", ["b", ["c", "d"]], "e"]): print(item) #=> "a" #=> "b" #=> "c" #=> "d" #=> "e" assert flatten([1, {2, 3}, (4,), range(5, 6)]) == (1, 2, 3, 4, 5) ``` Params: iterable (Iterable<Any>): the iterable to flatten Returns: tuple<Any>: the flattened iterable """ items = [] for item in iterable: if isinstance(item, (list, tuple, set, frozenset, range)): for nested_item in flatten(item): items.append(nested_item) else: items.append(item) return tuple(items)
8c47de3255906fb114a13ecfec4bf4a1204a0dfd
2,604
import random def _sample(probabilities, population_size): """Return a random population, drawn with regard to a set of probabilities""" population = [] for _ in range(population_size): solution = [] for probability in probabilities: # probability of 1.0: always 1 # probability of 0.0: always 0 if random.uniform(0.0, 1.0) < probability: solution.append(1) else: solution.append(0) population.append(solution) return population
ac781075f8437ea02b2dde3b241c21685c259e0c
2,605
def decode_labels(labels): """Validate labels.""" labels_decode = [] for label in labels: if not isinstance(label, str): if isinstance(label, int): label = str(label) else: label = label.decode('utf-8').replace('"', '') labels_decode.append(label) return labels_decode
36b8b10af2cd2868ab1923ccd1e620ccf815d91a
2,606
from pathlib import Path def _path_to_str(var): """Make sure var is a string or Path, return string representation.""" if not isinstance(var, (Path, str)): raise ValueError("All path parameters must be either strings or " "pathlib.Path objects. Found type %s." % type(var)) else: return str(var)
c5ae3ed06be31de3220b5400966866ccda29b9fc
2,607
def form_cleaner(querydict): """ Hacky way to transform form data into readable data by the model constructor :param querydict: QueryDict :return: dict """ r = dict(querydict.copy()) # Delete the CRSF Token del r['csrfmiddlewaretoken'] for key in list(r): # Take first element of array r[key] = r[key][0] # Delete empty fields if r[key] == '' or r[key] is None: del r[key] return r
83d61f028748132803555da85f0afe0215be2edd
2,611
def has_1080p(manifest): """Return True if any of the video tracks in manifest have a 1080p profile available, else False""" return any(video['width'] >= 1920 for video in manifest['videoTracks'][0]['downloadables'])
f187ff7fd8f304c0cfe600c4bed8e809c4c5e105
2,612
import os def get_filename(filePath): """get filename without file extension from file path """ absFilePath = os.path.abspath(filePath) return os.path.basename(os.path.splitext(absFilePath)[0])
e9ccddf29f38f88ccd65764a2914689611b142e8
2,613
def default_k_pattern(n_pattern): """ the default number of pattern divisions for crossvalidation minimum number of patterns is 3*k_pattern. Thus for n_pattern <=9 this returns 2. From there it grows gradually until 5 groups are made for 40 patterns. From this point onwards the number of groups is kept at 5. bootstrapped crossvalidation also uses this function to set k, but scales n_rdm to the expected proportion of samples retained when bootstrapping (1-np.exp(-1)) """ if n_pattern < 12: k_pattern = 2 elif n_pattern < 24: k_pattern = 3 elif n_pattern < 40: k_pattern = 4 else: k_pattern = 5 return k_pattern
60d083ffed24987882fa8074d99e37d06748eaf3
2,616
def _cast_wf(wf): """Cast wf to a list of ints""" if not isinstance(wf, list): if str(type(wf)) == "<class 'numpy.ndarray'>": # see https://stackoverflow.com/questions/2060628/reading-wav-files-in-python wf = wf.tolist() # list(wf) does not convert int16 to int else: wf = list(wf) # fallback if len(wf) > 0: assert isinstance(wf[0], int), f"first element of wf wasn't an int, but a {type(wf[0])}" return wf
cf2bf853b3ac021777a65d5323de6990d8dc4c5c
2,617
def ms(val): """ Turn a float value into milliseconds as an integer. """ return int(val * 1000)
97f7d736ead998014a2026a430bf3f0c54042010
2,619
def order_json_objects(obj): """ Recusively orders all elemts in a Json object. Source: https://stackoverflow.com/questions/25851183/how-to-compare-two-json-objects-with-the-same-elements-in-a-different-order-equa """ if isinstance(obj, dict): return sorted((k, order_json_objects(v)) for k, v in obj.items()) if isinstance(obj, list): return sorted(order_json_objects(x) for x in obj) return obj
5a0459d227b0a98c536290e3e72b76424d29820c
2,621
import torch def compute_rays_length(rays_d): """Compute ray length. Args: rays_d: [R, 3] float tensor. Ray directions. Returns: rays_length: [R, 1] float tensor. Ray lengths. """ rays_length = torch.norm(rays_d, dim=-1, keepdim=True) # [N_rays, 1] return rays_length
9b43f9ea79708a690282a04eec65dbabf4a7ae36
2,623
import itertools def _repeat_elements(arr, n): """ Repeats the elements int the input array, e.g. [1, 2, 3] -> [1, 1, 1, 2, 2, 2, 3, 3, 3] """ ret = list(itertools.chain(*[list(itertools.repeat(elem, n)) for elem in arr])) return ret
95cf8ebb75505d2704cf957cdd709b8fa735973a
2,624
def atlas_slice(atlas, slice_number): """ A function that pulls the data for a specific atlas slice. Parameters ---------- atlas: nrrd Atlas segmentation file that has a stack of slices. slice_number: int The number in the slice that corresponds to the fixed image for registration. Returns ------- sagittal: array Sagittal view being pulled from the atlas. coronal: array Coronal view being pulled from the atlas. horizontal: arrary Horizontal view being pulled from the atlas. """ epi_img_data2 = atlas.get_fdata() sagittal = epi_img_data2[140, :, :] coronal = epi_img_data2[:, slice_number, :] horizontal = epi_img_data2[:, :, 100] return sagittal, coronal, horizontal
bafe5d886568203792b0f6178302f3ca5d536e5b
2,627
from typing import Dict import aiohttp async def head(url: str) -> Dict: """Fetch headers returned http GET request. :param str url: The URL to perform the GET request for. :rtype: dict :returns: dictionary of lowercase headers """ async with aiohttp.request("HEAD", url) as res: response_headers = res.headers return {k.lower(): v for k, v in response_headers.items()}
b4decbfb4e92863c07c5202e2c884c02e590943f
2,629
def _create_serialize(cls, serializers): """ Create a new serialize method with extra serializer functions. """ def serialize(self, value): for serializer in serializers: value = serializer(value) value = super(cls, self).serialize(value) return value serialize.__doc__ = serializers[0].__doc__ return serialize
522f6a14fe3e2bca70c141f14dc8b400be1ca680
2,630
def determine_if_pb_should_be_filtered(row, min_junc_after_stop_codon): """PB should be filtered if NMD, a truncation, or protein classification is not likely protein coding (intergenic, antisense, fusion,...) Args: row (pandas Series): protein classification row min_junc_after_stop_codon (int): mininum number of junctions after stop codon a protein can have. used in NMD determination Returns: int: 1 if should be filtered, 0 if should not be filtered """ # filter out pbs that are artifacts or noncoding pclass = str(row['protein_classification']) num_junc_after_stop_codon = int(row['num_junc_after_stop_codon']) pclass_base_to_keep = ['pFSM','pNIC'] pclass_base = str(row['protein_classification_base']) if pclass_base not in pclass_base_to_keep and num_junc_after_stop_codon > min_junc_after_stop_codon: return 1 elif 'trunc' in pclass: return 1 elif 'intergenic' in pclass: return 1 elif 'antisense' in pclass: return 1 elif 'fusion' in pclass: return 1 elif 'orphan' in pclass: return 1 elif 'genic' in pclass: return 1 return 0
29ab7ce53ac7569c4d8a29e8e8564eab33b3f545
2,631
def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train an encoder-decoder model on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # OPTIONAL: Implement return None
47fa1893cc04b491292461db6c8a3418b464ba45
2,632
def close_to_cron(crontab_time, time_struct): """coron的指定范围(crontab_time)中 最接近 指定时间 time_struct 的值""" close_time = time_struct cindex = 0 for val_struct in time_struct: offset_min = val_struct val_close = val_struct for val_cron in crontab_time[cindex]: offset_tmp = val_struct - val_cron if offset_tmp > 0 and offset_tmp < offset_min: val_close = val_struct offset_min = offset_tmp close_time[cindex] = val_close cindex = cindex + 1 return close_time
7ce04d9b4260e7ea1ed7c3e95e7c36928989024e
2,633
def project_to_2D(xyz): """Projection to (0, X, Z) plane.""" return xyz[0], xyz[2]
c6cdb8bd6dce65f6ce39b14b9e56622832f35752
2,634
def create_updated_alert_from_slack_message(payload, time, alert_json): """ Create an updated raw alert (json) from an update request in Slack """ values = payload['view']['state']['values'] for value in values: for key in values[value]: if key == 'alert_id': continue if key == 'severity': if values[value][key].get('selected_option'): alert_json[key] = \ values[value][key]['selected_option']['text']['text'] if key == 'active': if values[value][key].get('selected_option'): alert_json[key] = \ values[value][key]['selected_option']['text']['text'] else: if values[value][key].get('value'): alert_json[key] = values[value][key]['value'] alert_json['datetime'] = time return alert_json
a685a0c0da472f055dc8860bdf09970a1ecc8aff
2,635
def enforce(*types): """ decorator function enforcing, and converting, argument data types """ def decorator(fn): def new_function(*args, **kwargs): # convert args into something mutable, list in this case newargs = [] for original_argument, type_to_convert in zip(args, types): newargs.append(type_to_convert(original_argument)) return fn(*newargs, **kwargs) return new_function return decorator
217ad3adccdaa9fc83ceaf5ef2c0905b8d54f1ed
2,636
from typing import List def _get_rec_suffix(operations:List[str]) -> str: """ finished, checked, Parameters ---------- operations: list of str, names of operations to perform (or has performed), Returns ------- suffix: str, suffix of the filename of the preprocessed ecg signal """ suffix = "-".join(sorted([item.lower() for item in operations])) return suffix
270a1b3749342d05819eafef3fa5175da393b1ad
2,639
def get_A_text(params, func_type=None): """ Get text associated with the fit of A(s) """ line1 = r'$A(s|r)$ is assumed to take the form:' line2 = (r'$A(s|r) = s^{-1}\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^a ' r'exp\bigg{(}{-\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^b}\bigg{)}$') a, b = params['a'], params['b'] line3 = r'where a = {:.4f} and b = {:.4f}'.format(a, b) text = '\n'.join([line1, line2, line3]) return text
ec68c49a7912dc5630e3c96a09d667ce52f89914
2,640
def transform_to_dict(closest_list: list) -> dict: """ Returns dict {(latitude, longitude): {film1, film2, ...}, ...} from closest_list [[film1, (latitude, longitude)], ...], where film1, film2 are titles of films, (latitude, longitude) is a coordinates of a place where those films were shoot. >>> transform_to_dict([["film1", (49, 24)]]) {(49, 24): {'film1'}} """ closest_dict = {} for film, coord in closest_list: if coord in closest_dict: closest_dict[coord].add(film) else: closest_dict[coord] = {film} return closest_dict
e7c6fae73792a828d85db03e794bfb69c7b1fe87
2,641
def dms2dd(s): """convert lat and long to decimal degrees""" direction = s[-1] degrees = s[0:4] dd = float(degrees) if direction in ('S','W'): dd*= -1 return dd
cb76efbf8c3b6a75bcc26593fab81a8ef3e16bbf
2,643
import signal def _signal_exit_code(signum: signal.Signals) -> int: """ Return the exit code corresponding to a received signal. Conventionally, when a program exits due to a signal its exit code is 128 plus the signal number. """ return 128 + int(signum)
050eee98632216fddcbd71e4eb6b0c973f6d4144
2,645
def is_contained(target, keys): """Check is the target json object contained specified keys :param target: target json object :param keys: keys :return: True if all of keys contained or False if anyone is not contained Invalid parameters is always return False. """ if not target or not keys: return False # if keys is just a string convert it to a list if type(keys) == str: keys = [keys] # traverse the list to check json object # if key does not exist or value is None then return False try: for key in keys: if target[key] is None: return False except KeyError: return False # All seems to be going well return True
948196d4b470788199506bd7768e03554fa67b40
2,646
def map(x, in_min, in_max, out_min, out_max): """ Map a value from one range to another :param in_min: minimum of input range :param in_max: maximum of input range :param out_min: minimum of output range :param out_max: maximum of output range :return: The value scaled to the new range :rtype: int """ return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min)
4117af35b0061df1fd271306accf198692442dac
2,647
import time import sys def mock_tensorboard(logdir, host, port, print_nonsense, print_nothing, address_in_use, sleep_time): """Run fake TensorBoard.""" if logdir is None: print('A logdir must be specified. Run `tensorboard --help` for ' 'details and examples.') return -1 elif print_nothing: time.sleep(sleep_time) elif print_nonsense: for i in range(0, 150): print('Lorem ipsum %d' % i, file=sys.stderr) time.sleep(0.1) elif address_in_use: print('TensorBoard attempted to bind to port %d, but it was already in use' % 1234, file=sys.stderr) else: time.sleep(1) print('TensorBoard 1.8.0 at http://ntbthinkpad:%d' % 6006, file=sys.stderr)
26a793264fa9561fabc9fa9d2fcb1377a6b60783
2,648
def get_chord_type(chord): """'Parses' input for a chord and returns the type of chord from it""" cleaned_chord = chord[1:] cleaned_chord = cleaned_chord.replace('b', '') cleaned_chord = cleaned_chord.replace('#', '') mapping = { '7': 'seven', '9': 'nine', 'm7': 'minor7', 'm9': 'minor9', 'm': 'minor', 'M7': 'major7', 'M9': 'major9', '': 'major', } return mapping[cleaned_chord]
4a753eb31f1e33340a7aa4df6942c4752b208fdd
2,649
def file_base_features(path, record_type): """Return values for BASE_SCHEMA features.""" base_feature_dict = { "record_id": path, "record_type": record_type, # "utc_last_access": os.stat(path).st_atime, "utc_last_access": 1600000000.0, } return base_feature_dict
12f16684002892d7af59a1e26e8a40501098ca4f
2,650
import itertools def node_extractor(dataframe, *columns): """ Extracts the set of nodes from a given dataframe. :param dataframe: dataframe from which to extract the node list :param columns: list of column names that contain nodes :return: list of all unique nodes that appear in the provided dataset """ data_list = [dataframe[column].unique().tolist() for column in columns] return list(set(itertools.chain.from_iterable(data_list)))
7a4ab889257a0f2c5ddfe18e65d0a7f5f35d8d98
2,651
def _apply_attention_constraint( e, last_attended_idx, backward_window=1, forward_window=3 ): """Apply monotonic attention constraint. **Note** This function is copied from espnet.nets.pytorch_backend.rnn.attention.py """ if e.size(0) != 1: raise NotImplementedError( "Batch attention constraining is not yet supported.") backward_idx = last_attended_idx - backward_window forward_idx = last_attended_idx + forward_window if backward_idx > 0: e[:, :backward_idx] = -float("inf") if forward_idx < e.size(1): e[:, forward_idx:] = -float("inf") return e
213ef514a9cff31134185e38c57d46921eba763a
2,652
def _prepare_memoization_key(args, kwargs): """ Make a tuple of arguments which can be used as a key for a memoized function's lookup_table. If some object can't be hashed then used its __repr__ instead. """ key_list = [] for arg in args: try: hash(arg) key_list.append(arg) except: key_list.append(repr(arg)) for (k, v) in kwargs.items(): try: hash(k) hash(v) key_list.append((k, v)) except: key_list.append((repr(k), repr(v))) return tuple(key_list)
c83e08c42886ba0e7f6e4defe5bc8f53f5682657
2,655
def day_log_add_id(day_log): """ その日のログにID(day_id)を割り振る :param day_log: :return: """ for v in range(len(day_log)): day_log[v]['day_id'] = v + 1 return day_log
c4608b07e86c074a11cf78d171490ec152092eeb
2,656
def brillance(p, g, m = 255): """ p < 0 : diminution de la brillance p > 0 : augmentation de la brillance """ if (p + g < m + 1) and (p + g > 0): return int(p + g) elif p + g <= 0: return 0 else: return m
b40169e487521c146c4c0777517492205951cf16
2,657
def by_tag(articles_by_tag, tag): """ Filter a list of (tag, articles) to list of articles by tag""" for a in articles_by_tag: if a[0].slug == tag: return a[1]
642472a89cb624ed02a6e8ec488b72856ac231a9
2,658
def dp_port_id(switch: str, port: str) -> str: """ Return a unique id of a DP switch port based on switch name and port name :param switch: :param port: :return: """ return 'port+' + switch + ':' + port
479891e41b51114744dcbb2b177180c19cd1bfd5
2,659
import csv def read_csv(file_path, delimiter=",", encoding="utf-8"): """ Reads a CSV file Parameters ---------- file_path : str delimiter : str encoding : str Returns ------- collection """ with open(file_path, encoding=encoding) as file: data_in = list(csv.reader(file, delimiter=delimiter)) return data_in
a4f1da219b0e5d752ff606614e93abbfc3d30597
2,660
import importlib def import_activity_class(activity_name, reload=True): """ Given an activity subclass name as activity_name, attempt to lazy load the class when needed """ try: module_name = "activity." + activity_name importlib.import_module(module_name) return True except ImportError as e: return False
b4cea3fad1f08a5758972847d3e03a41f89f223c
2,661
def extract_arguments(start, string): """ Return the list of arguments in the upcoming function parameter closure. Example: string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))' arguments (output): '[{'start': 1, 'end': 7}, {'start': 8, 'end': 16}, {'start': 17, 'end': 19}, {'start': 20, 'end': 53}]' """ arguments = [] closures = { "<": 0, "(": 0 } current_position = start argument_start_pos = current_position + 1 # Search for final parenthesis while current_position < len(string): if string[current_position] == "(": closures["("] += 1 elif string[current_position] == ")": closures["("] -= 1 elif string[current_position] == "<": closures["<"] += 1 elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0: closures["<"] -= 1 # Finished all arguments if closures["("] == 0 and closures["<"] == 0: # Add final argument arguments.append({"start": argument_start_pos, "end": current_position}) break # Finished current argument if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",": arguments.append({"start": argument_start_pos, "end": current_position}) argument_start_pos = current_position + 1 current_position += 1 return arguments
8e6e3fecc0643aa3f55108916a7c6892a96f13aa
2,662
def tuple_list_to_lua(tuple_list): """Given a list of tuples, return a lua table of tables""" def table(it): return "{" + ",".join(map(str, it)) + "}" return table(table(t) for t in tuple_list)
71ec1a29f5e23b8bf82867617fe157fbba4a2332
2,664
def fancy_vector(v): """ Returns a given 3-vector or array in a cute way on the shell, if you use 'print' on the return value. """ return "\n / %5.2F \\\n" % (v[0]) + \ " | %5.2F |\n" % (v[1]) + \ " \\ %5.2F /\n" % (v[2])
2340f22aa87da00abad30b9946c374f34b38496d
2,665
def any_of(elements): """ Check to see if the argument is contained in a list of possible elements. :param elements: The elements to check the argument against in the predicate. :return: A predicate to check if the argument is a constituent element. """ def predicate(argument): return argument in elements return predicate
adacf8fd632d25452d22dab0a8a439021083ec83
2,666
def find_year(films_lst: list, year: int): """ Filter list of films by given year """ filtered_films_lst = [line for line in films_lst if line[1] == str(year)] return filtered_films_lst
f4c11e09e76831afcf49154234dd57044536bce1
2,667
import torch def batch_eye_like(X: torch.Tensor): """Return batch of identity matrices like given batch of matrices `X`.""" return torch.eye(*X.shape[1:], out=torch.empty_like(X))[None, :, :].repeat(X.size(0), 1, 1)
266ee5639ce303b81e2cb82892e64f37a09695ff
2,668
def cal_occurence(correspoding_text_number_list): """ calcualte each occurence of a number in a list """ di = dict() for i in correspoding_text_number_list: i = str(i) s = di.get(i, 0) if s == 0: di[i] = 1 else: di[i] = di[i] + 1 return di
aafabc6abdf4bf1df1b8d9e23a4af375df3ac75b
2,669
def booleans(key, val): """returns ucsc formatted boolean""" if val in (1, True, "on", "On", "ON"): val = "on" else: val = "off" return val
f210a2ce6b998e65d2e5934f1318efea0f96c709
2,670
def ConvertVolumeSizeString(volume_size_gb): """Converts the volume size defined in the schema to an int.""" volume_sizes = { "500 GB (128 GB PD SSD x 4)": 500, "1000 GB (256 GB PD SSD x 4)": 1000, } return volume_sizes[volume_size_gb]
b1f90e5ded4d543d88c4f129ea6ac03aeda0c04d
2,671
def get_snps(x: str) -> tuple: """Parse a SNP line and return name, chromsome, position.""" snp, loc = x.split(' ') chrom, position = loc.strip('()').split(':') return snp, chrom, int(position)
52672c550c914d70033ab45fd582fb9e0f97f023
2,672
import torch def compute_i_th_moment_batches(input, i): """ compute the i-th moment for every feature map in the batch :param input: tensor :param i: the moment to be computed :return: """ n, c, h, w = input.size() input = input.view(n, c, -1) mean = torch.mean(input, dim=2).view(n, c, 1, 1) eps = 1e-5 var = torch.var(input, dim=2).view(n, c, 1, 1) + eps std = torch.sqrt(var) if i == 1: return mean elif i == 2: return std else: sol = ((input.view(n, c, h, w) - mean.expand(n, c, h, w)) / std).pow(i) sol = torch.mean(sol.view(n, c, -1), dim=2).view(n, c, 1, 1) return sol
2ab3b7bfd34b482cdf55d5a066b57852182b5b6a
2,673
import argparse def parse_options(): """Parses and checks the command-line options. Returns: A tuple containing the options structure. """ usage = 'Usage: ./update_mapping.py [options]' desc = ('Example: ./update_mapping.py -o mapping.json.\n' 'This script generates and stores a file that gives the\n' 'mapping between phone serial numbers and BattOr serial numbers\n' 'Mapping is based on which physical ports on the USB hubs the\n' 'devices are plugged in to. For instance, if there are two hubs,\n' 'the phone connected to port N on the first hub is mapped to the\n' 'BattOr connected to port N on the second hub, for each N.') parser = argparse.ArgumentParser(usage=usage, description=desc) parser.add_argument('-o', '--output', dest='out_file', default='mapping.json', type=str, action='store', help='mapping file name') parser.add_argument('-u', '--hub', dest='hub_types', action='append', choices=['plugable_7port'], help='USB hub types.') options = parser.parse_args() if not options.hub_types: options.hub_types = ['plugable_7port'] return options
7f7ee6a90e152023dbf6c6e163361a8c327108ae
2,674
def read_plot_pars() : """ Parameters are (in this order): Minimum box width, Maximum box width, Box width iterations, Minimum box length, Maximum box length, Box length iterations, Voltage difference """ def extract_parameter_from_string(string): #returns the part of the string after the ':' sign parameter = "" start_index = string.find(':') for i in range(start_index+1, len(string)-1): parameter += string[i] return parameter f = open("input.txt", "r") pars = [] line_counter = 0 for line in f: if ((line_counter > 0) and (line_counter < 8)): pars.append(extract_parameter_from_string(line)) line_counter += 1 return pars
c78dc8e2a86b20eb6007850a70c038de5bf9f841
2,675
def get_upper_parentwidget(widget, parent_position: int): """This function replaces this: self.parentWidget().parentWidget().parentWidget() with this: get_upper_parentwidget(self, 3) :param widget: QWidget :param parent_position: Which parent :return: Wanted parent widget """ while parent_position > 0: widget = widget.parentWidget() parent_position -= 1 else: return widget
ff010f3d9e000cfa3c58160e150c858490f2412d
2,676