content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def gen_filelist(infiles, tmpd) : """Write all audio files to a temporary text document for ffmpeg Returns the path of that text document.""" filename = tmpd/"files.txt" with open(filename, "w") as f: for file in infiles: # This part ensures that any apostrophes are escaped file = str(file).split("'") if len(file) > 1: file = "'\\''".join(file) else: file = file[0] # Write the file line f.write("file '"+file+"'\n") return filename
c7d21c62de34fea98725a39fec735836e0cfd3d9
3,656
import os def check_envs(): """Checks environment variables. The MONGODB_PWD is a needed variable to enable mongodb connection. Returns: bool: If all needed environment variables are set. """ if not os.environ.get('MONGODB_PWD', False): return False return True
22ac892b0eee827f63f7d8258ca49f0e462452fd
3,657
def getLines(filename): """Return list of lines from file""" with open(filename, 'r', errors='ignore') as ff: return ff.readlines()
36e515decaa3876eed3b5db8363fb81a5db89c84
3,658
import requests def dog(argv, params): """Returns a slack attachment with a picture of a dog from thedogapi""" # Print prints logs to cloudwatch # Send response to response url dogurl = 'https://api.thedogapi.com/v1/images/search?mime_types=jpg,png' dogr = requests.get(dogurl) url = dogr.json()[0].get('url') payload = { 'statusCode': '200', "attachments": [ { "author_name": '@{} /catops dog'.format( params.get('user_name', ['CatOps'])[0]), "fallback": "Woof woof.", "title": "Woof!", "text": "Evil doggo.", "image_url": url, "color": "#764FA5" } ], 'response_type': 'in_channel', 'headers': {'Content-Type': 'application/json'} } return payload
cb80426e6cab0aa2fc58b78baa0ff225d654f04a
3,660
import torch def convert_to_torch_tensors(X_train, y_train, X_test, y_test): """ Function to quickly convert datasets to pytorch tensors """ # convert training data _X_train = torch.LongTensor(X_train) _y_train = torch.FloatTensor(y_train) # convert test data _X_test = torch.LongTensor(X_test) _y_test = torch.FloatTensor(y_test) # return the tensors return _X_train, _y_train, _X_test, _y_test
0d40fe19c977b25e3a2571adc98790d7058a77d9
3,661
def get_pybricks_reset_vector(): """Gets the boot vector of the pybricks firmware.""" # Extract reset vector from dual boot firmware. with open("_pybricks/firmware-dual-boot-base.bin", "rb") as pybricks_bin_file: pybricks_bin_file.seek(4) return pybricks_bin_file.read(4)
7d504e7e6e6ca444932fd61abb701a010a259254
3,662
def parse_img_name(path): """parse image by frame name :param name [str] :output img_lists """ code = path.split('\\')[-1].split('.')[0] vid_id = path.split('\\')[-2] rcp_id = path.split('\\')[-3] seg_id = int(code[:4]) frm_id = int(code[4:]) return rcp_id, vid_id, seg_id, frm_id
6e0a140934c584400365f12feb8a86cfea3bbb2b
3,663
import time def tokenize_protein(text): """ Tokenizes from a proteins string into a list of strings """ aa = ['A','C','D','E','F','G','H','I','K','L', 'M','N','P','Q','R','S','T','V','W','Y'] N = len(text) n = len(aa) i=0 seq = list() timeout = time.time()+5 for i in range(N): symbol = text[i] if (symbol in aa): seq.append(symbol) else: seq.append('X') if time.time() > timeout: break return seq
7dba531023aef97dcbfb37af75a9a1459a1e94d2
3,665
from typing import Callable def read_xml_string() -> Callable[[int, int, str], str]: """Read an XML file to a string. Subsection string needs to include a prepending '-'.""" def _read_xml_string(number: int, year: int, subsection: str) -> str: xmlfile = f"tests/data/xmls/session-{number:03}-{year}{subsection}.xml" with open(xmlfile, "r", encoding="utf-8") as infile: lines = infile.readlines() return " ".join([line.strip() for line in lines]) return _read_xml_string
2b4e4c3585e26138e5fecf820699e97e1011a842
3,666
def all_ndcubes(request): """ All the above ndcube fixtures in order. """ return request.getfixturevalue(request.param)
906412ebe9a26de5cfddcb1d1431ab014c8084c6
3,667
def filter_order_by_oid(order, oid): """ :param order: :type order: :class:`tests.testapp.testapp.trading.models.Order` :param oid: Order ID :type oid: int """ return order.tid == oid
bf84e2e9f2fa19dc19e1d42ceef92dd3050d1e89
3,668
def has_poor_grammar(token_strings): """ Returns whether the output has an odd number of double quotes or if it does not have balanced parentheses. """ has_open_left_parens = False quote_count = 0 for token in token_strings: if token == '(': if has_open_left_parens: return True else: has_open_left_parens = True elif token == ')': if has_open_left_parens: has_open_left_parens = False else: return True elif token == '"': quote_count += 1 return quote_count % 2 == 1 or has_open_left_parens
b35c6af0ec771ac22ff66d9ca875f5d916cb9489
3,669
import subprocess def countbam(sortedbam, outdir): """calculates the raw counts from a BAM index parameters ---------- sortedbam string, the name of the sorted bam file outdir string, the path of the output directory returns ---------- counts_file = file containing the counts """ counts_file = f"{sortedbam[:-3]}count" try: cmd_count = f"samtools idxstats {sortedbam} > {counts_file}" res_count = subprocess.check_output(cmd_count, shell=True) except(subprocess.CalledProcessError): print('Unable to calculate raw counts from BAM') return (counts_file)
de60c7af2a479d00487a1891a64c926f9a2e0ae0
3,671
def tick2dayfrac(tick, nbTicks): """Conversion tick -> day fraction.""" return tick / nbTicks
50d01778f62203d37e733a6b328455d3ea10e239
3,673
def matrixMultVec(matrix, vector): """ Multiplies a matrix with a vector and returns the result as a new vector. :param matrix: Matrix :param vector: vector :return: vector """ new_vector = [] x = 0 for row in matrix: for index, number in enumerate(row): x += number * vector[index] new_vector.append(x) x = 0 return new_vector
8a03b3acfec0d91fcf0d2c85b4e2bdd4f3053dd2
3,674
def url_to_filename(base, url): """Return the filename to which the page is frozen. base -- path to the file url -- web app endpoint of the page """ if url.endswith('/'): url = url + 'index.html' return base / url.lstrip('/')
35084e8b5978869bf317073c76bafc356a7d9046
3,676
def _msd_anom_3d(time, D_alpha, alpha): """3d anomalous diffusion function.""" return 6.0*D_alpha*time**alpha
e5204c52368202665e4dd4acd7d86096349c0d29
3,677
import os def tmp_envfile(tmp_path, monkeypatch): """Create a temporary environment file.""" tmp_file_path = tmp_path / "setenv.txt" monkeypatch.setenv("GITHUB_ENV", os.fspath(tmp_file_path)) return tmp_file_path
04deab16ce4b0e115e9fdc9b65a023f7c63f054f
3,679
def url_root(): """根路径""" return """ <p>Hello ! Welcome to Rabbit's WebServer Platform !</p> <a href="http://www.miibeian.gov.cn/" target="_blank" style="">京ICP备 18018365 号</a>&#8195;@2018Rabbit """
2e6d1d5301ac67bdec30cdeeaeed3c8638568de9
3,680
def map_keys(func, d): """ Returns a new dict with func applied to keys from d, while values remain unchanged. >>> D = {'a': 1, 'b': 2} >>> map_keys(lambda k: k.upper(), D) {'A': 1, 'B': 2} >>> assert map_keys(identity, D) == D >>> map_keys(identity, {}) {} """ return dict((func(k), v) for k, v in d.iteritems())
5e9798d208db5e43dad497d64a4b8e469c67eb3b
3,681
def defaults(dictionary, overwriteNone=False, **kwargs): """ Set default values of a given dictionary, option to overwrite None values. Returns given dictionary with values updated by kwargs unless they already existed. :param dict dictionary: :param overwriteNone: Whether to overwrite None values. :param kwargs: """ for key, value in dictionary.items(): dictValueIsNone = value is None kwargsHasValue = key in kwargs if overwriteNone and dictValueIsNone and kwargsHasValue: continue # Overwrite kwargs with dictionary kwargs[key] = value return kwargs
6def5bb71839b3b627a5597ea6fa7fa1b48e463b
3,682
import json def dump_into_json(filename, metrics): """Dump the metrics dictionary into a JSON file It will automatically dump the dictionary: metrics = {'duration': duration, 'voltage_extremes': voltage_extremes, 'num_beats': num_beats, 'mean_hr_bpm': mean_hr_bpm, 'beats': beats}. in to a JSON file with the file name as the data file name. :param filename: name of the file being read :param metrics: a dictionary containing duration, voltage extremes, number of beats, beats per minute, and the time where beats occur :returns: - successful_JSON - test if it has successfully create JSON """ successful_JSON = False try: output_file = open(filename + '.json', 'w') json.dump(metrics, output_file) output_file.close() successful_JSON = True except TypeError: print("Unsuccessfully output JSON file") return successful_JSON
2e6effbcefe7cb3033c4c472cbee3850c00ae06b
3,683
def object_comparator_lookup(src_obj, dst_obj): """ Compare an object with another entry by entry """ dont_match = [] no_upstream = [] for i in dst_obj: count_name = 0 count_value = 0 for j in src_obj: if list(j.keys())[0] == list(i.keys())[0]: count_name = 1 if j[list(j.keys())[0]] == i[list(i.keys())[0]]: count_value = 1 if count_name == 0: if list(i.keys())[0] != "last-modified": print(i.keys(), list(i.keys())[0]) no_upstream.append(i) else: if count_value == 0: dont_match.append(i) if no_upstream or dont_match: return 1 else: return 0
ba5767624255da915d9c07d25b62880c387f6f00
3,686
def is_primitive(v): """ Checks if v is of primitive type. """ return isinstance(v, (int, float, bool, str))
d22607c0e2b93b82b1da6beb50de68668624dd71
3,687
def linkify_only_full_urls(attrs, new=False): """Linkify only full links, containing the scheme.""" if not new: # This is an existing <a> tag, leave it be. return attrs # If the original text doesn't contain the scheme, don't linkify. if not attrs['_text'].startswith(('http:', 'https:')): return None return attrs
89fcc7f3fc53353686260779ae8ddb4c0523c57b
3,688
def set_vars(api, file:str, tess_profile:dict): """ Reads the user-specific variables from the tess_profile :param api: :param file: :param tess_profile: :return: """ # Set necessary information api.SetImageFile(file) # Set Variable api.SetVariable("save_blob_choices", "T") if 'variables' in tess_profile: for var in tess_profile['variables']: api.SetVariable(var, str(tess_profile['variables'][var]['value'])) api.Recognize() return 0
a7fbe0c5bc584928623e2eadc36240ea3b0f37de
3,691
def serialize_to_jsonable(obj): """ Serialize any object to a JSONable form """ return repr(obj)
c8632b8b475d49b56d47b29afa8b44676b7882a5
3,692
import torch def fps_and_pred(model, batch, **kwargs): """ Get fingeprints and predictions from the model. Args: model (nff.nn.models): original NFF model loaded batch (dict): batch of data Returns: results (dict): model predictions and its predicted fingerprints, conformer weights, etc. """ model.eval() # make the fingerprints outputs, xyz = model.make_embeddings(batch, xyz=None, **kwargs) # pool to get the learned weights and pooled fingerprints pooled_fp, learned_weights = model.pool(outputs) # get the final results results = model.readout(pooled_fp) # add sigmoid if it's a classifier and not in training mode if model.classifier: keys = list(model.readout.readout.keys()) for key in keys: results[key] = torch.sigmoid(results[key]) # add any required gradients results = model.add_grad(batch=batch, results=results, xyz=xyz) # put into a dictionary conf_fps = [i.cpu().detach() for i in outputs["conf_fps_by_smiles"]] energy = batch.get("energy") boltz_weights = batch.get("weights") # with operations to de-batch n_confs = [(n // m).item() for n, m in zip(batch['num_atoms'], batch['mol_size'])] for key, val in results.items(): results[key] = [i for i in val] results.update({"fp": [i for i in pooled_fp], "conf_fps": conf_fps, "learned_weights": learned_weights, "boltz_weights": (list(torch.split (boltz_weights, n_confs)))}) if energy is not None: results.update({"energy": list(torch.split(energy, n_confs))}) return results
1a8cca3ffe0d386e506ab42f6e77e00b1a5975d1
3,694
import re def replace_subject_with_object(sent, sub, obj): """Replace the subject with object and remove the original subject""" sent = re.sub(r'{}'.format(obj), r'', sent, re.IGNORECASE) sent = re.sub(r'{}'.format(sub), r'{} '.format(obj), sent, re.IGNORECASE) return re.sub(r'{\s{2,}', r' ', sent, re.IGNORECASE)
1c7f8115968c4e4ef10dcc3b83f0f259433f5082
3,695
import sys def get_title_count(titles, is_folder): """ Gets the final title count """ final_title_count = 0 if len(titles.all) == 0: if is_folder == False: sys.exit() else: return 0 else: for group, disc_titles in titles.all.items(): for title in disc_titles: final_title_count += 1 return final_title_count
bdd239698f98c845cbecb27924725c38257547b6
3,697
def judgement(seed_a, seed_b): """Return amount of times last 16 binary digits of generators match.""" sample = 0 count = 0 while sample <= 40000000: new_a = seed_a * 16807 % 2147483647 new_b = seed_b * 48271 % 2147483647 bin_a = bin(new_a) bin_b = bin(new_b) last16_a = bin_a[-16:] last16_b = bin_b[-16:] if last16_a == last16_b: count += 1 seed_a = new_a seed_b = new_b sample += 1 return count
9d778909ba6b04e4ca3adbb542fce9ef89d7b2b7
3,698
def accession(data): """ Get the accession for the given data. """ return data["mgi_marker_accession_id"]
132dcbdd0712ae30ce7929e58c4bc8cdf73aacb2
3,699
def atomic_number(request): """ An atomic number. """ return request.param
6f1a868c94d0a1ee4c84a76f04b4cabc3e0356e0
3,700
import re def load_mac_vendors() : """ parses wireshark mac address db and returns dict of mac : vendor """ entries = {} f = open('mac_vendors.db', 'r') for lines in f.readlines() : entry = lines.split() # match on first column being first six bytes r = re.compile(r'^([0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})$') if len(entry) > 0 and r.match(entry[0]) : # lowercase as convention entries[entry[0].lower()] = entry[1] return entries
361e9c79de8b473c8757ae63384926d266b68bbf
3,701
def reorganize_data(texts): """ Reorganize data to contain tuples of a all signs combined and all trans combined :param texts: sentences in format of tuples of (sign, tran) :return: data reorganized """ data = [] for sentence in texts: signs = [] trans = [] for sign, tran in sentence: signs.append(sign) trans.append(tran) data.append((signs, trans)) return data
27b4efd99bbf470a9f8f46ab3e34c93c606d0234
3,702
def query_schema_existence(conn, schema_name): """Function to verify whether the current database schema ownership is correct.""" with conn.cursor() as cur: cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)', [schema_name]) return cur.fetchone().exists
9c556283d255f580fc69a9e41a4d452d15e1eb17
3,703
import mimetypes def img_mime_type(img): """Returns image MIME type or ``None``. Parameters ---------- img: `PIL.Image` PIL Image object. Returns ------- mime_type : `str` MIME string like "image/jpg" or ``None``. """ if img.format: ext = "." + img.format return mimetypes.types_map.get(ext.lower()) return None
fe46af6e5c03a1ae80cb809c81ab358ac5c085fa
3,704
def get_yourContactINFO(rows2): """ Function that returns your personal contact info details """ yourcontactINFO = rows2[0] return yourcontactINFO
beea815755a2e6817fb57a37ccc5aa479455bb81
3,706
def filter_out_nones(data): """ Filter out any falsey values from data. """ return (l for l in data if l)
39eb0fb7aafe799246d231c5a7ad8a150ed4341e
3,707
import itertools def testBinaryFile(filePath): """ Test if a file is in binary format :param fileWithPath(str): File Path :return: """ file = open(filePath, "rb") #Read only a couple of lines in the file binaryText = None for line in itertools.islice(file, 20): if b"\x00" in line: #Return to the beginning of the binary file file.seek(0) #Read the file in one step binaryText = file.read() break file.close() #Return the result return binaryText
809a962881335ce0a3a05e341a13b413c381fedf
3,708
def factor_size(value, factor): """ Factors the given thumbnail size. Understands both absolute dimensions and percentages. """ if type(value) is int: size = value * factor return str(size) if size else '' if value[-1] == '%': value = int(value[:-1]) return '{0}%'.format(value * factor) size = int(value) * factor return str(size) if size else ''
41b061fb368d56ba18b52cd7a6a3322292671d83
3,709
def temp_database(tmpdir_factory): """ Initalize the Database """ tmpdb = str(tmpdir_factory.mktemp('temp'))+"/testdb.sqlite" return tmpdb
5cfcb27e6ac76766e21a1612691dbe79d1713abd
3,710
import ast def get_classes(pyfile_path): """ Obtiene las clases que están dentro de un fichero python :param str pyfile_path: nombre del fichero a inspeccionar :return: devuelve una lista con todas las clases dentro de un fichero python :rtype: list .. code-block:: python >> get_classes('./data.py') ['Module', 'PythonFile'] """ with open(pyfile_path, 'r') as f: inspection = ast.parse(f.read()) return [class_.name for class_ in inspection.body if isinstance(class_, ast.ClassDef)]
72f376d10fd02574085a0236e10ea8901033ebd0
3,711
import array def ordinate(values,maxrange,levels): """Ordinate values given a maximum data range and number of levels Parameters: 1. values: an array of continuous values to ordinate 2. maxrange: the maximum data range. Values larger than this will be saturated. 3. levels: the number of levels at which values are ordinated """ quantizer=lambda dist,maxrange,levels: int(1.0*max(1,dist-1)*levels/maxrange)+1 if type(values)==list or type(values)==tuple or type(values)==array: ordinated=[] for v in values: if v==0: ordinated.append(v) else: ordinated.append(quantizer(v,maxrange,levels)) return ordinated else: if values==0: return values else: return quantizer(values,maxrange,levels)
4db4a26579d9208cd90ec630cf82e54a4a7ec3fe
3,713
def get_book_info(book_id, books): """Obtain meta data of certain books. :param book_id: Books to look up :type: int or list of ints :param books: Dataframe containing the meta data :type: pandas dataframe :return: Meta data for the book ids :rtype: List[str], List[str], List[str] """ if not isinstance(book_id, list): book_id = [book_id] book_authors, book_titles, book_img_urls = [], [], [] for i in book_id: book_info = books.loc[books["book_id"]==i].squeeze() if book_info.shape[0]==0: raise ValueError("Could not find book_id {} in the dataset.".format(book_id)) book_authors.append(book_info.authors) book_titles.append(book_info.title) book_img_urls.append(book_info.image_url) return book_authors, book_titles, book_img_urls
64a91a498f9bf9df918d256a7ce705e98dadbbd9
3,719
import functools import six def save_error_message(func): """ This function will work only if transition_entity is defined in kwargs and transition_entity is instance of ErrorMessageMixin """ @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exception: message = six.text_type(exception) transition_entity = kwargs['transition_entity'] if message: transition_entity.error_message = message transition_entity.save(update_fields=['error_message']) raise exception return wrapped
9ac592100445a0232efc4afaa3807b050c8eddff
3,720
import requests import json def GetTSAWaitTimes(airportCode): """ Returns data from the TSA Wait Times API for a particular airport shortcode. :param airportCode: 3-letter shortcode of airport :return: Returns the full parsed json data from TSA Wait Times API """ base_url = "http://apps.tsa.dhs.gov/MyTSAWebService/GetTSOWaitTimes.ashx" params_tsa_d = {} params_tsa_d['ap'] = airportCode params_tsa_d['output'] = 'json' try: ## Uncomment this line if you want to get with caching for testing purposes #tsa_result_diction = json.loads(get_with_caching(base_url, params_tsa_d, saved_cache, cache_fname)) ## Comment out these two lines if you want to enable caching results_tsa = requests.get(base_url, params=params_tsa_d) tsa_result_diction = json.loads(results_tsa.text) return tsa_result_diction except Exception: print("Error: Unable to load TSA wait times. Please try again.") print("Exception: ") # sys.exit(1) quit()
bd03be14c95a3892ac75a0396da12ca04b52a59b
3,721
def namedtuple_to_dict(model_params): """Transfers model specification from a named tuple class object to dictionary.""" init_dict = {} init_dict["GENERAL"] = {} init_dict["GENERAL"]["num_periods"] = model_params.num_periods init_dict["GENERAL"]["num_choices"] = model_params.num_choices init_dict["CONSTANTS"] = {} init_dict["CONSTANTS"]["delta"] = model_params.delta init_dict["CONSTANTS"]["mu"] = model_params.mu init_dict["CONSTANTS"]["benefits"] = model_params.benefits init_dict["INITIAL_CONDITIONS"] = {} init_dict["INITIAL_CONDITIONS"]["educ_max"] = model_params.educ_max init_dict["INITIAL_CONDITIONS"]["educ_min"] = model_params.educ_min init_dict["SIMULATION"] = {} init_dict["SIMULATION"]["seed_sim"] = model_params.seed_sim init_dict["SIMULATION"]["num_agents_sim"] = model_params.num_agents_sim init_dict["SOLUTION"] = {} init_dict["SOLUTION"]["seed_emax"] = model_params.seed_emax init_dict["SOLUTION"]["num_draws_emax"] = model_params.num_draws_emax init_dict["PARAMETERS"] = {} init_dict["PARAMETERS"]["optim_paras"] = model_params.optim_paras init_dict["DERIVED_ATTR"] = {} init_dict["DERIVED_ATTR"]["educ_range"] = model_params.educ_range init_dict["DERIVED_ATTR"]["shocks_cov"] = model_params.shocks_cov return init_dict
9ac2f23aff3b9c57599eb2c2c6cacd455ac711a5
3,722
from typing import Mapping from pathlib import Path import os def _strip_paths(notebook_json: Mapping, project_root: Path): """Strip user paths from given notebook.""" project_root_string = str(project_root) + os.sep mutated = False for cell in notebook_json["cells"]: if cell["cell_type"] == "code": for output in cell["outputs"]: for line_number, line in enumerate(output.get("text", [])): if project_root_string in line: output["text"][line_number] = line.replace( project_root_string, "" ) mutated = True return notebook_json, mutated
318493239c964104b4838ffe2ba2b37a295ef792
3,723
import time def time_as_int() -> int: """ Syntactic sugar for >>> from time import time >>> int(time()) """ return int(time.time())
f7f6d037d156c09a01c0ff13f8b43418133ab1b0
3,724
def should_retry_http_code(status_code): """ :param status_code: (int) http status code to check for retry eligibility :return: (bool) whether or not responses with the status_code should be retried """ return status_code not in range(200, 500)
69acb5bd34b06e1ff1e29630ac93e60a3ccc835c
3,725
def extract_values(inst): """ :param inst: the instance :return: python values extracted from the instance """ # inst should already be python return inst
087bb00ee6e3666b4a9e682ca420623982a12102
3,726
import re def eq_portions(actual: str, expected: str): """ Compare whether actual matches portions of expected. The portions to ignore are of two types: - ***: ignore anything in between the left and right portions, including empty - +++: ignore anything in between left and right, but non-empty :param actual: string to test :param expected: expected string, containing at least one of the two patterns :return: a list of the portions ignored; if empty, it means there is no match. >>> eq_portions('', '+++aaaaaa***ccccc+++eeeeeee+++') () >>> eq_portions('_1__aaaaaa__2__ccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++') ('_1__', '__2__', '_3__', '_4_') >>> eq_portions('_1__aaaaaaccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++') ('_1__', '', '_3__', '_4_') >>> eq_portions('_1__aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee+++') () >>> eq_portions('aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee') () >>> eq_portions('aaaaaa_1__ccccc__2_eeeeeee', '***aaaaaa***ccccc+++eeeeeee***') ('', '_1__', '__2_', '') >>> eq_portions('aaaaaa___ccccc___eeeeeee', '***aaaaaa') () >>> eq_portions('aaaaaa___ccccc___eeeeeee', 'aaaaaa') Traceback (most recent call last): ... ValueError: The 'expected' argument must contain at least one *** OR +++ """ re_expect = re.escape(expected) ANYTHING = re.escape('\\*' * 3) SOMETHING = re.escape('\\+' * 3) if not re.search(ANYTHING, re_expect) and not re.search(SOMETHING, re_expect): raise ValueError("The 'expected' argument must contain at least one *** OR +++") re_expect = re.sub(SOMETHING, '(.+)', re_expect) re_expect = re.sub(ANYTHING, '(.*)', re_expect) matches = re.fullmatch(re_expect, actual) if not matches: return () return matches.groups()
704b2a83575347c5143c2dc0aca5227a8fc5bd4b
3,727
import numpy as np def _organize_arch(fils, pth): """Allocate data from each specific type of file (keys from the input dict) to a new dict Arguments: fils {dict} -- Dictionary containing type of files and list of files Returns: [dict] -- [description] """ imgdata = dict() for i in fils.keys(): images = dict() for ii in np.arange(len(fils[i])): images[str('img_' + str(ii+1))] = {'path': pth + str('\\') + str(fils[i][ii]), 'coords': np.loadtxt(pth + str('\\') + str(fils[i][ii]), skiprows=1, usecols=(-2, -1))} imgdata[i] = images return imgdata
c62c9b23bf4735c2062090d77278ce5a8acbd668
3,728
def sort(request): """Boolean sort keyword for concat and DataFrame.append.""" return request.param
83f35eb41bc0cf7eecea932ae4f14646d9e8732f
3,729
def is_comprehension(leaf): """ Return true if the leaf is the beginning of a list/set/dict comprehension. Returns true for generators as well """ if leaf.type != 'operator' or leaf.value not in {'[', '(', '{'}: return False sibling = leaf.get_next_sibling() return (sibling.type in {'testlist_comp', 'dictorsetmaker'} and sibling.children[-1].type == 'sync_comp_for')
11fff76ff8ed19b3d57359b56db886c003603a86
3,730
def get_awb_shutter( f ): """ Get AWB and shutter speed from file object This routine extracts the R and B white balance gains and the shutter speed from a jpeg file made using the Raspberry Pi camera. These are stored as text in a custom Makernote. The autoexposure and AWB white balance values are not available directly until a picture is taken and are saved in a Jpeg. Returns 0 for the values if they're not found """ f.seek(256) s = f.read(512) # Only part of the header needed r_pos = s.find('gain_r=') b_pos = s.find('gain_b=') s_pos = s.find(' exp=') gain_r = eval(s[r_pos+7:r_pos+12].split()[0]) if r_pos > -1 else 0 gain_b = eval(s[b_pos+7:b_pos+12].split()[0]) if b_pos > -1 else 0 shutter = eval(s[s_pos+5:s_pos+12].split()[0]) if s_pos > -1 else 0 return (gain_r,gain_b,shutter)
cfafdf531809729ae0ec96ab90a60a4961b9437a
3,732
def format_rfidcard(rfidcard): """ :type rfidcard: apps.billing.models.RfidCard """ return { 'atqa': rfidcard.atqa if len(rfidcard.atqa) > 0 else None, 'sak': rfidcard.sak if len(rfidcard.sak) > 0 else None, 'uid': rfidcard.uid, 'registered_at': rfidcard.registered_at.isoformat(), 'user': rfidcard.user.username, }
120ca8e338b01235b2ba12ae3f874fd317ffebe8
3,733
def make_exposure_shares(exposure_levels, geography="geo_nm", variable="rank"): """Aggregate shares of activity at different levels of exposure Args: exposure_levels (df): employment by lad and sector and exposure ranking geography (str): geography to aggregate over variable (str): variable we want to calculate shares over """ exp_distr = ( exposure_levels.groupby(["month_year", variable, geography])["value"] .sum() .reset_index(drop=False) .groupby([geography, "month_year"]) .apply(lambda x: x.assign(share=lambda df: df["value"] / df["value"].sum())) ).reset_index(drop=True) return exp_distr
02d990f2b08e3acb2a2b8ac01e44848770bdea71
3,734
from typing import OrderedDict def load_jed(fn): """ JEDEC file generated by 1410/84 from PALCE20V8H-15 06/28/20 22:42:11* DM AMD* DD PALCE20V8H-15* QF2706* G0* F0* L00000 0000000000000000000000000100000000000000* """ ret = {} d = OrderedDict() with open(fn) as f: li = 0 for l in f: li += 1 # remove *, newline l = l.strip()[0:-1] if not l: continue if li == 2: ret["description"] = l continue parts = l.split(" ") main_line = " ".join(parts[1:]) if parts[0] == "DM": ret["vendor"] = main_line elif parts[0] == "DD": ret["part"] = main_line elif l[0:2] == "QF": ret["len"] = int(l[2:]) elif l[0] == "L": # L00000 0000000000000000000000000100000000000000* addr, bits = l.split(" ") addr = int(addr[1:], 10) d[addr] = bits else: continue ret["data"] = d return ret
6570bcdaabb495c13e9419a532c85b15efdf957a
3,739
def load_decoder(autoencoder): """ Gets the decoders associated with the inputted model """ dim = len(autoencoder.get_config()['input_layers']) mag_phase_flag = False decoders = [] if dim == 2: mag_phase_flag = True decoders.append(autoencoder.get_layer('mag_decoder')) decoders.append(autoencoder.get_layer('phase_decoder')) else: decoders.append(autoencoder.get_layer('decoder')) return decoders,mag_phase_flag
8e39470e48f5a6c147d93567c0bdb33a588c790d
3,740
def detect_side(start: dict, point: dict, degrees): """detect to which side robot should rotate""" if start['lat'] < point['lat'] and start['lng'] < point['lng']: return f'{degrees} degrees right' elif start['lat'] < point['lat'] and start['lng'] > point['lng']: return f'{degrees} degrees left' elif start['lat'] > point['lat'] and start['lng'] < point['lng']: return f'{degrees + 90} degrees right' elif start['lat'] > point['lat'] and start['lng'] > point['lng']: return f'{degrees + 90} degrees left' elif degrees == 0: return f'{0} degress' elif degrees == 180: return f'{180} degrees right' elif start['lat'] == point['lat'] and start['lng'] < point['lng']: return f'{degrees} degress right' elif start['lat'] == point['lat'] and start['lng'] > point['lng']: return f'{degrees} degress left'
124833bbdcdf36c280cdde8e829f15ae5301e323
3,741
import torch def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ assert start_idx > 0 and end_idx > 0 assert len(x.size()) == 2 src_len, batch_size = x.size() # batch_size, 1, src_len x = x.t().unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1]) moving_sum = ( torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ) .squeeze(1) .t() ) moving_sum = moving_sum[end_idx:-start_idx] assert src_len == moving_sum.size(0) assert batch_size == moving_sum.size(1) return moving_sum
fa3cb672e23fccad75965da2ca10955134167c7e
3,742
def make_sequence_output(detections, classes): """ Create the output object for an entire sequence :param detections: A list of lists of detections. Must contain an entry for each image in the sequence :param classes: The list of classes in the order they appear in the label probabilities :return: """ return { 'detections': detections, 'classes': classes }
019d3b74699af20a9f3cbc43b575e8bae5e15946
3,743
def fix(text): """Repairs encoding problems.""" # NOTE(Jonas): This seems to be fixed on the PHP side for now. # import ftfy # return ftfy.fix_text(text) return text
7fd97db345a604131f52b272a7dd13ab4f3f9153
3,744
def from_url_representation(url_rep: str) -> str: """Reconvert url representation of path to actual path""" return url_rep.replace("__", "/").replace("-_-", "_")
5cf4e1e8cb284c66449807ea275e4fa6b5a3e3ad
3,745
def encode(value): """ Encode strings in UTF-8. :param value: value to be encoded in UTF-8 :return: encoded value """ return str(u''.join(value).encode('utf-8'))
697f99f028d4b978b591d006273b9d5f688711f3
3,747
def get_season(months, str_='{}'): """ Creates a season string. Parameters: - months (list of int) - str_ (str, optional): Formatter string, should contain exactly one {} at the position where the season substring is included. Returns: str """ if months is None: return '' elif len(set(months).difference([1, 2, 12])) == 0: return str_.format('DJF') elif len(set(months).difference([3, 4, 5])) == 0: return str_.format('MAM') elif len(set(months).difference([6, 7, 8])) == 0: return str_.format('JJA') elif len(set(months).difference([9, 10, 11])) == 0: return str_.format('SON') elif len(set(months).difference([11, 12, 1, 2, 3])) == 0: return str_.format('NDJFM') elif len(set(months).difference([5, 6, 7, 8, 9])) == 0: return str_.format('MJJAS') else: return str_.format('-'.join(map(str, months)))
73b4e8169f08ef286a0b57779d22c3436538fc30
3,748
def data_availability(tags): """ get availability based on the validation tags Args: tags (pandas.DataFrame): errors tagged as true (see function data_validation) Returns: pandas.Series: availability """ return ~tags.any(axis=1)
240bed8f169d23610f11c214d3644f02e5435412
3,749
def is_empty_config(host): """ Check if any services should to be configured to run on the given host. """ return host.AS is None
c4ec3861c497ac49ed69ecd1d6da31ab8fe2829c
3,751
def total_value(metric): """Given a time series of values, sum the values""" total = 0 for i in metric: total += i return total
4454bfaeb0797bc03b14819bde48dc8f5accc4d3
3,752
def se_beta_formatter(value: str) -> str: """ SE Beta formatter. This formats SE beta values. A valid SE beta values is a positive float. @param value: @return: """ try: se_beta = float(value) if se_beta >= 0: result = str(se_beta) else: raise ValueError(f'position expected positive float "{value}"') except ValueError as value_error: raise ValueError( f'position could not be parsed as integer "{value}" details : {value_error}', ) from value_error return result
30dde489e1a8a70c0f1093caa1ce289c759b26d6
3,754
def extract_urlparam(name, urlparam): """ Attempts to extract a url parameter embedded in another URL parameter. """ if urlparam is None: return None query = name+'=' if query in urlparam: split_args = urlparam[urlparam.index(query):].replace(query, '').split('&') return split_args[0] if split_args else None else: return None
198771d40eeddc3b7dbf2924d9d49fe7a7f0a51d
3,755
def parse_gt_from_anno(img_anno, classes): """parse_gt_from_anno""" print('parse ground truth files...') ground_truth = {} for img_name, annos in img_anno.items(): objs = [] for anno in annos: if anno[1] == 0. and anno[2] == 0. and anno[3] == 0. and anno[4] == 0.: continue if int(anno[0]) == -1: continue xmin = anno[1] ymin = anno[2] xmax = xmin + anno[3] - 1 ymax = ymin + anno[4] - 1 xmin = int(xmin) ymin = int(ymin) xmax = int(xmax) ymax = int(ymax) cls = classes[int(anno[0])] gt_box = {'class': cls, 'box': [xmin, ymin, xmax, ymax]} objs.append(gt_box) ground_truth[img_name] = objs return ground_truth
63ba02bb0511cdc02245528041257639e764605f
3,759
def pt_to_tup(pt): """ Convenience method to generate a pair of two ints from a tuple or list. Parameters ---------- pt : list OR tuple Can be a list or a tuple of >=2 elements as floats or ints. Returns ------- pt : tuple of int A pair of two ints. """ return (int(pt[0]),int(pt[1]));
7013b2477959f528b98d364e4cc44ac8700fb366
3,760
import argparse def parsing(lst=None): """ Function for parsing command line >>> parsing(["2020", "80", "90", "dataset"]) (2020, 80.0, 90.0, 'dataset') """ parser = argparse.ArgumentParser(description="""Module, which reads data from a file\ with a films list, determines films, \ made in the given year, and geolocation of their production places. Then finds 10 or fewer such nearest to the given point places, makes markers \ for them, and creates a map with a layer of that markers. Also, there is another layer, which contains markers\ of film shooting places in Ukraine. You should enter the year of the films' production, the coordinates of the needed point,\ in comparison to which\ nearest films will be displayed (lat, lon), and the path to the dataset with your films.""") parser.add_argument("year", metavar="Year", type=int, help="Year of films, which\ will be displayed.") parser.add_argument("latitude", metavar="Latitude", type=float, \ help="Latitude of your point.") parser.add_argument("longitude", metavar="Longitude", type=float,\ help="Longitude of your point.") parser.add_argument("path", metavar="Path", help="Path to your dataset.") if lst: results = parser.parse_args(lst) else: results = parser.parse_args() universal_message = ", please check your coordinates" if not -90 <= results.latitude <= 90: message = "%r not in range [-90, 90]" % (results.latitude,) raise argparse.ArgumentTypeError(message + universal_message) if not -90 <= results.longitude <= 90: message = "%r not in range [-90, 90]" % (results.longitude,) raise argparse.ArgumentTypeError(message + universal_message) return results.year, results.latitude, results.longitude, results.path
f625f09b31b60b80d91474560ca01b5df92d567c
3,762
def flatten(lst): """Shallow flatten *lst*""" return [a for b in lst for a in b]
203e971e43aea4d94bfa0ffa7057b416ef0bf545
3,763
def sep_num(number, space=True): """ Creates a string representation of a number with separators each thousand. If space is True, then it uses spaces for the separator otherwise it will use commas Note ---- Source: https://stackoverflow.com/questions/16670125/python-format-string-thousand-separator-with-spaces :param number: A number :type number: int | float :param space: Separates numbers with spaces if True, else with commas :type space: bool :return: string representation with space separation :rtype: str """ if space: return '{:,}'.format(number).replace(',', ' ') else: return '{:,}'.format(number)
ee7dfbb60fb01bb7b6bb84cbe56ec50dfab4b339
3,764
def all_results_failed(subsystems): """Check if all results have failed status""" for subsystem in subsystems.values(): if subsystem['subsystemStatus'] == 'OK': # Found non-failed subsystem return False # All results failed return True
6612397c5b1605ad3e623e3c47264869c93cd47d
3,765
def LowercaseMutator(current, value): """Lower the value.""" return current.lower()
7fed0dc4533948c54b64f649e2c85dca27ee9bc5
3,767
from typing import Dict from typing import Any from typing import MutableMapping import logging def merge_optional(default_dict: Dict[str, Any], update_dict: Dict[str, Any], tpe: str): """ Function to merge dictionaries to add set parameters from update dictionary into default dictionary. @param default_dict: Default configuraiton dictionary. @type default_dict: dict @param update_dict: Update configuration to be merged into default configurations. @type update_dict: dict @param tpe: String representation of type of learner. @type tpe: str @return: Result of merged dictionaries. @rtype: dict """ default_copy = default_dict.copy() for k, v in default_copy.items(): # pylint: disable=invalid-name if k in update_dict: if all(isinstance(e, MutableMapping) for e in (v, update_dict[k])): update_dict[k] = merge_optional(v, update_dict[k], tpe) else: logging.warning(f"Gotten unknown alternative mapping {k}:{v} for {tpe}") # Base case update = list(filter(lambda item: item[1] is not None, update_dict.items())) for k, v in update: # pylint: disable=invalid-name if not isinstance(v, dict): logging.info(f'Updating {k} from {default_copy[k]} to {v} for {tpe}') default_copy[k] = v return default_copy
32e52e58604b01061b6c5a3122287c0e5d8a9a84
3,769
import pathlib def testdata(request): """ If expected data is required for a test this fixture returns the path to a folder with name '.testdata' located in the same director as the calling test module """ testdata_dir = '.testdata' module_dir = pathlib.Path(request.fspath).parent return module_dir / testdata_dir
5d9a440b178aca00635f567420aaa9c406a1d7d2
3,770
import math def inverse_document_frequency(word_occurrence, num_texts): """Takes in a word (string) and texts (list of lists and calculates the number of texts over number of texts where the word occurs""" try: IDF = float(num_texts) / float(word_occurrence) return math.log(IDF) except ZeroDivisionError: return 0
a64e84b3c7d378e61765a84d3f4405e77b2ffd40
3,771
import argparse def parse_command_arguments(): """Returns parsed command arguments""" parser = argparse.ArgumentParser(description="svg-to-swift converter") parser.add_argument("--input_file", required=True, help="SVG file to convert.") parser.add_argument("--output_file", default="svg.swift", help="File to save in swift code.") return parser.parse_args()
c01ed5e6c1f5ca116e87da4e686d866a67876daa
3,772
def drop_useless_columns(data): """Drop the columns containing duplicate or useless columns.""" data = data.drop( labels=[ # we stay in a given city "agency_id", "agency_name", "agency_short_name", # we stay on a given transportation network "transportation_type", "transportation_subtype", # we already have stop id "stop_name_unofficial", # we already have line name "line_id", # we don't need this "circuit_transfer", ], axis=1, ) return data
7a47625a5df7e9fa66cefe2f326af3f0b9f59b79
3,773
import math def expfloats (floats): """Manipulates floats so that their tiles are logarithmic sizes large to small""" return [math.exp(i) for i in floats]
3943b8f8eedd4195693e0bace2223819f3728bb2
3,774
import re def normalize(string: str) -> str: """ Normalize a text string. :param string: input string :return: normalized string """ string = string.replace("\xef\xbb\xbf", "") # remove UTF-8 BOM string = string.replace("\ufeff", "") # remove UTF-16 BOM # string = unicodedata.normalize("NFKD", string) # convert to NFKD normal form string = re.compile(r"[0-9]").sub("0", string) # map all numbers to "0" string = re.compile(r"(?:''|``|[\"„“”‘’«»])").sub("'", string) # normalize quotes string = re.compile(r"(?:[‒–—―]+|-{2,})").sub("--", string) # normalize dashes string = re.compile(r"\s+").sub(" ", string) # collapse whitespace characters return string.strip()
2adaeffb60af598dad40bd6f5cd7e61e6b238123
3,775
def extract_data(mask, dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cor=None): """Get a clean sample based on mask Parameters ---------- mask : array of boolean mask for extract data dra/ddc : array of float R.A.(*cos(Dec.))/Dec. differences dra_err/ddc_err : array of float formal uncertainty of dra(*cos(dc_rad))/ddc ra_rad/dc_rad : array of float Right ascension/Declination in radian Returns ---------- dra_new/ddc_new: array of float R.A.(*cos(Dec.))/Dec for the clean sample. differences dra_err_new/ddc_err_new: array of float formal uncertainty of dra(*cos(dc_rad))/ddc for the clean sample ra_rad_new/dc_rad_new: array of float Right ascension/Declination in radian for the clean sample ra_dc_cor_new: array of float covariance/correlation coefficient between dra and ddc for the clean sample """ # Extract the clean sample dra_new, ddc_new = dra[mask], ddc[mask] dra_err_new, ddc_err_new = dra_err[mask], ddc_err[mask] ra_rad_new, dc_rad_new = ra_rad[mask], dc_rad[mask] if ra_dc_cor is None: ra_dc_cor_new = ra_dc_cor else: ra_dc_cor_new = ra_dc_cor[mask] return dra_new, ddc_new, dra_err_new, ddc_err_new, ra_rad_new, dc_rad_new, ra_dc_cor_new
70286c6134fb19833f6033c827bb2ab2cd26afb1
3,776
from typing import Iterable def token_converter(tokens: Iterable[str]) -> Iterable[str]: """Convert tokens.""" def convert(token: str) -> str: return token.lower().replace("-", "_") return map(convert, tokens)
93bf2436c81091ec55b6a7a4d6d3fc728a68e093
3,778
def update_instance(instance, validated_data): """Update all the instance's fields specified in the validated_data""" for key, value in validated_data.items(): setattr(instance, key, value) return instance.save()
2f4d5c4ec9e524cbe348a5efad9ecae27739b339
3,779
import pipes def _ShellQuote(command_part): """Escape a part of a command to enable copy/pasting it into a shell. """ return pipes.quote(command_part)
31ccd5bd64de657cd3ac5c36c643e9f2f09f2318
3,780
import math def circlePoints(x, r, cx, cy): """Ther dunction returns the y coordinate of a circonference's point :x: x's coordinate value. :r: length of the radius. :cx: x coordinate of the center. :cy: y coordinate of the center.""" return math.sqrt(math.pow(r,2) - math.pow(x-cx, 2)) + cy
c2cc14a845dccbcf62a38be3af69808024289adc
3,781
import torch def get_gram_matrix(tensor): """ Returns a Gram matrix of dimension (distinct_filer_count, distinct_filter_count) where G[i,j] is the inner product between the vectorised feature map i and j in layer l """ G = torch.mm(tensor, tensor.t()) return G
ad86f06768c07d6fe1ff509d996991f786ea1ffa
3,782
def deduplicate(s, ch): """ From http://stackoverflow.com/q/42216559/610569 s = 'this is an irritating string with random spacing .' deduplicate(s) 'this is an irritating string with random spacing .' """ return ch.join([substring for substring in s.strip().split(ch) if substring])
5b2bb10376143a1597ddfab1711716c802cdf113
3,783
def sum_series(n): """Calculate sum of n+(n-2)+(n-4)...""" return n if n < 2 else n + sum_series(n - 2)
317317fc6a7f14a9cbd564266b73ac087b2bdbd2
3,784
def get_all_contained_items(item, stoptest=None): """ Recursively retrieve all items contained in another item :param text_game_maker.game_objects.items.Item item: item to retrieve items\ from :param stoptest: callback to call on each sub-item to test whether\ recursion should continue. If stoptest() == True, recursion will\ continue :return: list of retrieved items :rtype: [text_game_maker.game_objects.items.Item] """ ret = [] if not item.is_container: return ret stack = [item] while stack: subitem = stack.pop(0) for i in subitem.items: ret.append(i) if i.is_container: if stoptest and stoptest(i): continue stack.append(i) return ret
d04e5c297dddb70db83637e748281f04b08b6a25
3,785
def conv_seq_to_sent_symbols(seq, excl_symbols=None, end_symbol='.', remove_end_symbol=True): """ Converts sequences of tokens/ids into a list of sentences (tokens/ids). :param seq: list of tokens/ids. :param excl_symbols: tokens/ids which should be excluded from the final result. :param end_symbol: self-explanatory. :param remove_end_symbol: whether to remove from each sentence the end symbol. :return: list of lists, where each sub-list contains tokens/ids. """ excl_symbols = excl_symbols if excl_symbols else {} assert end_symbol not in excl_symbols coll = [] curr_sent = [] for symbol in seq: if symbol in excl_symbols: continue if symbol == end_symbol: if not remove_end_symbol: curr_sent.append(symbol) coll.append(curr_sent) curr_sent = [] else: curr_sent.append(symbol) if curr_sent: coll.append(curr_sent) return coll
a87da4bb5c34882d882832380f3831929ad41415
3,786
def param_value(memory, position, mode): """Get the value of a param according to its mode""" if mode == 0: # position mode return memory[memory[position]] elif mode == 1: # immediate mode return memory[position] else: raise ValueError("Unknown mode : ", mode)
e02ed7e1baea57af4b08c408b6decadee9c72162
3,787
def prob17(limit=1000): """ If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total. If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage. """ digits = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'} exceptions = {10: 'ten', 11: 'eleven', 12: 'twelve', 14: 'fourteen'} bases = {2: 'twen', 3: 'thir', 4: 'for', 5: 'fif', 6: 'six', 7: 'seven', 8: 'eigh', 9: 'nine'} powers = {1: 'teen', 10: 'ty', 100: 'hundred', 1000: 'thousand'} count = 0 for num in range(1, limit + 1): right = str(num)[-2:] #print right if int(right) == 0: pass elif int(right) in exceptions: count += len(exceptions[int(right)]) elif 10 < int(right) < 20: count += len(bases[int(right[1])]) + len(powers[1]) else: if right[-1] != '0': count += len(digits[int(right[-1])]) if len(right) == 2 and right[0] != '0': count += len(bases[int(right[0])]) + len(powers[10]) if len(str(num)) > 2: left = str(num)[:-2] #print left if right != '00': count += 3 if left[-1] != '0': count += len(digits[int(left[-1])]) + len(powers[100]) if len(left) == 2 and left[0] != '0': count += len(digits[int(left[0])]) + len(powers[1000]) return count
586f60fa4411a5818102a903286aa97095faeffb
3,789
import re def get_replaceid(fragment): """get replace id for shared content""" replaceid=re.findall(r":[A-z]+:\s(.+)", fragment)[0] return replaceid
25e1a940904d86c5e57d2d36dbd91247c6e08bb3
3,790