content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _non_blank_line_count(string): """ Parameters ---------- string : str or unicode String (potentially multi-line) to search in. Returns ------- int Number of non-blank lines in string. """ non_blank_counter = 0 for line in string.splitlines(): if line.strip(): non_blank_counter += 1 return non_blank_counter
dfa6f43af95c898b1f4763573e8bf32ddf659520
708,450
def encode_direct(list_a: list): """Problem 13: Run-length encoding of a list (direct solution). Parameters ---------- list_a : list The input list Returns ------- list of list An length-encoded list Raises ------ TypeError If the given argument is not of `list` type """ if not isinstance(list_a, list): raise TypeError('The argument given is not of `list` type.') if len(list_a) <= 1: # In case of empty or one-element list return. return list_a encoded, current, count = [], list_a[0], 1 for element in list_a[1:]: if current != element: # If current element does not match the recorded current # append the count to the list encoded.append(current if count == 1 else [count, current]) current, count = element, 1 else: # If another same element is found, increase counter count += 1 encoded.append(current if count == 1 else [count, current]) return encoded
9a20ffd2051003d5350f7e059d98c35310bc9bbe
708,451
import re def _find_word(input): """ _find_word - function to find words in the input sentence Inputs: - input : string Input sentence Outputs: - outputs : list List of words """ # lower case input = input.lower() # split by whitespace input = re.split(pattern = '[\s]+', string = input) # find words in WORD_POS pattern valid_word = lambda x: True if re.findall(pattern = r'[a-z]*_[a-z]*', string = x) else False outputs = [] for token in input: if valid_word(token): outputs.append(token.split('_')[0]) return outputs
c2e4aa6b5c127bf03593a9aa2c1ae035e83f5a64
708,452
import math def truncate(f, n): """ Floors float to n-digits after comma. """ return math.floor(f * 10 ** n) / 10 ** n
ae7e935a7424a15c02f7cebfb7de6ca9b4c715c0
708,454
def get_core_blockdata(core_index, spltcore_index, core_bases): """ Get Core Offset and Length :param core_index: Index of the Core :param splitcore_index: Index of last core before split :param core_bases: Array with base offset and offset after split :return: Array with core offset and core length """ core_base = int(core_bases[0]) core_len = int(core_bases[1]) core_split = 0 if len(core_bases) > 4: core_split = int(core_bases[4]) core_offset = core_base + core_index * core_len if core_split and core_index + 2 > spltcore_index: core_offset = core_split + (core_index - spltcore_index + 1) * core_len return [core_offset, core_len]
85efb96fa45ecfa3f526374c677e57c70e3dc617
708,455
import argparse def parse_args(): """ Parse input arguments Returns ------- args : object Parsed args """ h = { "program": "Simple Baselines training", "train_folder": "Path to training data folder.", "batch_size": "Number of images to load per batch. Set according to your PC GPU memory available. If you get " "out-of-memory errors, lower the value. defaults to 64", "epochs": "How many epochs to train for. Once every training image has been shown to the CNN once, an epoch " "has passed. Defaults to 15", "test_folder": "Path to test data folder", "num_workers": "Number of workers to load in batches of data. Change according to GPU usage", "test_only": "Set to true if you want to test a loaded model. Make sure to pass in model path", "model_path": "Path to your model", "learning_rate": "The learning rate of your model. Tune it if it's overfitting or not learning enough"} parser = argparse.ArgumentParser(description=h['program'], formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--train_folder', help=h["train_folder"], type=str) parser.add_argument('--batch_size', help=h['batch_size'], type=int, default=64) parser.add_argument('--epochs', help=h["epochs"], type=int, default=15) parser.add_argument('--test_folder', help=h["test_folder"], type=str) parser.add_argument('--num_workers', help=h["num_workers"], type=int, default=5) parser.add_argument('--test_only', help=h["test_only"], type=bool, default=False) parser.add_argument('--model_path', help=h["num_workers"], type=str), parser.add_argument('--learning_rate', help=h["learning_rate"], type=float, default=0.003) args = parser.parse_args() return args
5edfea499b64d35295ffd81403e3253027503d41
708,456
from datetime import datetime def timestamp(date): """Get the timestamp of the `date`, python2/3 compatible :param datetime.datetime date: the utc date. :return: the timestamp of the date. :rtype: float """ return (date - datetime(1970, 1, 1)).total_seconds()
a708448fb8cb504c2d25afa5bff6208abe1159a4
708,457
def pratt_arrow_risk_aversion(t, c, theta, **params): """Assume constant relative risk aversion""" return theta / c
ccbe6e74a150a4cbd3837ca3ab24bf1074d694c9
708,458
def getdate(targetconnection, ymdstr, default=None): """Convert a string of the form 'yyyy-MM-dd' to a Date object. The returned Date is in the given targetconnection's format. Arguments: - targetconnection: a ConnectionWrapper whose underlying module's Date format is used - ymdstr: the string to convert - default: The value to return if the conversion fails """ try: (year, month, day) = ymdstr.split('-') modref = targetconnection.getunderlyingmodule() return modref.Date(int(year), int(month), int(day)) except Exception: return default
21d27c3ef4e99b28b16681072494ce573e592255
708,459
def operation_dict(ts_epoch, request_dict): """An operation as a dictionary.""" return { "model": request_dict, "model_type": "Request", "args": [request_dict["id"]], "kwargs": {"extra": "kwargs"}, "target_garden_name": "child", "source_garden_name": "parent", "operation_type": "REQUEST_CREATE", }
e7b63d79c6de73616b39e2713a0ba2da6f9e2a25
708,461
def memory_index(indices, t): """Location of an item in the underlying memory.""" memlen, itemsize, ndim, shape, strides, offset = t p = offset for i in range(ndim): p += strides[i] * indices[i] return p
ed97592aa5444cfd6d6894b042b5b103d2de6afc
708,462
def _infer_color_variable_kind(color_variable, data): """Determine whether color_variable is array, pandas dataframe, callable, or scikit-learn (fit-)transformer.""" if hasattr(color_variable, "dtype") or hasattr(color_variable, "dtypes"): if len(color_variable) != len(data): raise ValueError( "color_variable and data must have the same length.") color_variable_kind = "scalars" elif hasattr(color_variable, "transform"): color_variable_kind = "transformer" elif hasattr(color_variable, "fit_transform"): color_variable_kind = "fit_transformer" elif callable(color_variable): color_variable_kind = "callable" elif color_variable is None: color_variable_kind = "none" else: # Assume color_variable is a selection of columns color_variable_kind = "else" return color_variable_kind
a1a21c6df4328331754f9fb960e64cf8bfe09be7
708,463
import os def ParseChromeosImage(chromeos_image): """Parse the chromeos_image string for the image and version. The chromeos_image string will probably be in one of two formats: 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \ chromiumos_test_image.bin 2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \ chromiumos_test_image.bin We parse these strings to find the 'chromeos_version' to store in the json archive (without the .datatime bit in the first case); and also the 'chromeos_image', which would be all of the first case, but only the part after '/chroot/tmp' in the second case. Args: chromeos_image: string containing the path to the chromeos_image that crosperf used for the test. Returns: version, image: The results of parsing the input string, as explained above. """ # Find the Chromeos Version, e.g. R45-2345.0.0..... # chromeos_image should have been something like: # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" if chromeos_image.endswith('/chromiumos_test_image.bin'): full_version = chromeos_image.split('/')[-2] # Strip the date and time off of local builds (which have the format # "R43-2345.0.0.date-and-time"). version, _ = os.path.splitext(full_version) else: version = '' # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then # it's an official image that got downloaded, so chop off the download path # to make the official image name more clear. official_image_path = '/chroot/tmp' if official_image_path in chromeos_image: image = chromeos_image.split(official_image_path, 1)[1] else: image = chromeos_image return version, image
49652dad39bcc1df8b3decae4ec374adaf353185
708,464
from datetime import datetime def datetime_to_epoch(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (seconds).""" return int(date_time.timestamp())
73767c663d66464420594e90a438687c9363b884
708,465
from functools import reduce def inet_aton(s): """Convert a dotted-quad to an int.""" try: addr = list(map(int, s.split('.'))) addr = reduce(lambda a,b: a+b, [addr[i] << (3-i)*8 for i in range(4)]) except (ValueError, IndexError): raise ValueError('illegal IP: {0}'.format(s)) return addr
abc16c14e416f55c9ae469b4b9c1958df265433c
708,466
def helper(): """I'm useful helper""" data = { "31 Dec 2019": "Wuhan Municipal Health Commission, China, reported a cluster of cases of pneumonia in Wuhan, Hubei Province. A novel coronavirus was eventually identified.", "1 January 2020": "WHO had set up the IMST (Incident Management Support Team) across the three levels of the organization: headquarters, regional headquarters and country level, putting the organization on an emergency footing for dealing with the outbreak.", "4 January 2020": "WHO reported on social media that there was a cluster of pneumonia cases – with no deaths – in Wuhan, Hubei province." } return data
1f0f58505ce4179d56b2bf6e4cb29e42cdd7cfc9
708,467
def otherEnd(contours, top, limit): """ top与end太近了,找另一个顶部的点,与top距离最远 """ tt = (0, 9999) for li in contours: for pp in li: p = pp[0] if limit(p[0]) and top[1] - p[1] < 15 and abs(top[0] - p[0]) > 50 and p[1] < tt[1]: tt = p return tt
4f938d33ba28c1999603cd60381ed6d9aec23815
708,468
def preprocessing(string): """helper function to remove punctuation froms string""" string = string.replace(',', ' ').replace('.', ' ') string = string.replace('(', '').replace(')', '') words = string.split(' ') return words
17f41a566c3661ab6ffb842ac6d610425fc779d1
708,469
def _get_rating_accuracy_stats(population, ratings): """ Calculate how accurate our ratings were. :param population: :param ratings: :return: """ num_overestimates = 0 num_underestimates = 0 num_correct = 0 for employee, rating in zip(population, ratings): if rating < employee: num_underestimates += 1 elif rating > employee: num_overestimates += 1 else: num_correct += 1 return num_underestimates, num_correct, num_overestimates
6fefd6faf465a304acc692b465f575cc4c3a62e3
708,470
from typing import Optional from typing import Any def get_or_create_mpc_section( mp_controls: "MpConfigControls", section: str, subkey: Optional[str] = None # type: ignore ) -> Any: """ Return (and create if it doesn't exist) a settings section. Parameters ---------- mp_controls : MpConfigControls The MP Config database. section : str The section name (top level settings item) subkey : Optional[str], optional Optional subkey to create, by default None Returns ------- Any The settings at that section[subkey] location. """ curr_section = mp_controls.get_value(section) if curr_section is None: mp_controls.set_value(section, {}) curr_section = mp_controls.get_value(section) if subkey and subkey not in curr_section: mp_controls.set_value(f"{section}.{subkey}", {}) return mp_controls.get_value(f"{section}.{subkey}") return mp_controls.get_value(section)
60b741f35e0a1c9fe924b472217e0e3b62a1d31e
708,471
def encrypt(plaintext, a, b): """ 加密函数:E(x) = (ax + b)(mod m) m为编码系统中的字母数,一般为26 :param plaintext: :param a: :param b: :return: """ cipher = "" for i in plaintext: if not i.isalpha(): cipher += i else: n = "A" if i.isupper() else "a" cipher += chr((a * (ord(i) - ord(n)) + b) % 26 + ord(n)) return cipher
0cbb57250d8d7a18740e19875f79127b8057ab06
708,472
import pathlib import shlex import os def construct_gn_command(output_path, gn_flags, python2_command=None, shell=False): """ Constructs and returns the GN command If shell is True, then a single string with shell-escaped arguments is returned If shell is False, then a list containing the command and arguments is returned """ gn_args_string = " ".join( [flag + "=" + value for flag, value in gn_flags.items()]) command_list = [str(pathlib.Path("tools", "gn", "bootstrap", "bootstrap.py")), "-v", "-s", "-o", str(output_path), "--gn-gen-args=" + gn_args_string] if python2_command: command_list.insert(0, python2_command) if shell: command_string = " ".join([shlex.quote(x) for x in command_list]) if python2_command: return command_string else: return os.path.join(".", command_string) else: return command_list
2177ea4436305733268a427e0c4b006785e41b2d
708,473
def _url_as_filename(url: str) -> str: """Return a version of the url optimized for local development. If the url is a `file://` url, it will return the remaining part of the url so it can be used as a local file path. For example, 'file:///logs/example.txt' will be converted to '/logs/example.txt'. Parameters ---------- url: str The url to check and optaimize. Returns ------- str: The url converted to a filename. """ return url.replace('file://', '')
d1aef7a08221c7788f8a7f77351ccb6e6af9416b
708,474
def CheckStructuralModelsValid(rootGroup, xyzGridSize=None, verbose=False): """ **CheckStricturalModelsValid** - Checks for valid structural model group data given a netCDF root node Parameters ---------- rootGroup: netCDF4.Group The root group node of a Loop Project File xyzGridSize: [int,int,int] or None The 3D grid shape to test data in this node to adhere to verbose: bool A flag to indicate a higher level of console logging (more if True) Returns ------- bool True if valid structural model data in project file, False otherwise. """ valid = True if "StructuralModels" in rootGroup.groups: if verbose: print(" Structural Models Group Present") smGroup = rootGroup.groups.get("StructuralModels") # if verbose: print(smGroup) if "easting" in smGroup.ncattrs() and "northing" in smGroup.ncattrs() and "depth" in smGroup.ncattrs(): if xyzGridSize != None: # Check gridSize from extents matches models sizes smGridSize = [smGroup.dimensions["easting"].size,smGroup.dimensions["northing"].size,smGroup.dimensions["depth"].size] if smGridSize != xyzGridSize: print("(INVALID) Extents grid size and Structural Models Grid Size do NOT match") print("(INVALID) Extents Grid Size : ", xyzGridSize) print("(INVALID) Structural Models Grid Size : ", smGridSize) valid = False else: if verbose: print(" Structural Models grid size adheres to extents") else: if verbose: print("No structural models extents in project file") else: if verbose: print("No Structural Models Group Present") return valid
d11ce42b041b8be7516f827883a37b40f6f98477
708,475
def link_name_to_index(model): """ Generate a dictionary for link names and their indicies in the model. """ return { link.name : index for index, link in enumerate(model.links) }
ba0e768b1160218908b6ecf3b186a73c75a69894
708,476
def get_border(border, size): """ Get border """ i = 1 while size - border // i <= border // i: # size > 2 * (border // i) i *= 2 return border // i
45233f53cdf6f0edb5b4a9262b61f2a70ac42661
708,477
import re def _get_values(attribute, text): """Match attribute in text and return all matches. :returns: List of matches. """ regex = '{}\s+=\s+"(.*)";'.format(attribute) regex = re.compile(regex) values = regex.findall(text) return values
59a0fdb7a39221e5f728f512ba0aa814506bbc37
708,478
def registry(): """ Return a dictionary of problems of the form: ```{ "problem name": { "params": ... }, ... }``` where `flexs.landscapes.AdditiveAAVPackaging(**problem["params"])` instantiates the additive AAV packaging landscape for the given set of parameters. Returns: dict: Problems in the registry. """ problems = { "heart": {"params": {"phenotype": "heart", "start": 450, "end": 540}}, "lung": {"params": {"phenotype": "lung", "start": 450, "end": 540}}, "kidney": {"params": {"phenotype": "kidney", "start": 450, "end": 540}}, "liver": {"params": {"phenotype": "liver", "start": 450, "end": 540}}, "blood": {"params": {"phenotype": "blood", "start": 450, "end": 540}}, "spleen": {"params": {"phenotype": "spleen", "start": 450, "end": 540}}, } return problems
5dd2e4e17640e0831daf02d0a2a9b9f90305a1c4
708,480
def is_in_period(datetime_, start, end): """指定した日時がstartからendまでの期間に含まれるか判定する""" return start <= datetime_ < end
3b830cb8d9e74934a09430c9cd6c0940cf36cf2e
708,481
import requests def get_session(token, custom_session=None): """Get requests session with authorization headers Args: token (str): Top secret GitHub access token custom_session: e.g. betamax's session Returns: :class:`requests.sessions.Session`: Session """ session = custom_session or requests.Session() session.headers = { "Authorization": "token " + token, "User-Agent": "testapp" } return session
88bf566144a55cf36daa46d3f9a9886d3257d767
708,482
import unicodedata def strip_accents(text): """ Strip accents from input String. :param text: The input string. :type text: String. :returns: The processed String. :rtype: String. """ text = unicodedata.normalize('NFD', text) text = text.encode('ascii', 'ignore') text = text.decode("utf-8") return str(text)
4a6e11e0a72438a7e604e90e44a7220b1426df69
708,483
import json def json_formatter(result, _verbose): """Format result as json.""" if isinstance(result, list) and "data" in result[0]: res = [json.dumps(record) for record in result[0]["data"]] output = "\n".join(res) else: output = json.dumps(result, indent=4, sort_keys=True) return output
68aae87577370d3acf584014651af21c7cbfa309
708,484
def pipe_hoop_stress(P, D, t): """Calculate the hoop (circumferential) stress in a pipe using Barlow's formula. Refs: https://en.wikipedia.org/wiki/Barlow%27s_formula https://en.wikipedia.org/wiki/Cylinder_stress :param P: the internal pressure in the pipe. :type P: float :param D: the outer diameter of the pipe. :type D: float :param t: the pipe wall thickness. :type t: float :returns: the hoop stress in the pipe. :rtype: float """ return P * D / 2 / t
9985d35c2c55e697ce21a880bb2234c160178f33
708,485
import os def parse_file_name(filename): """ Parse the file name of a DUD mol2 file to get the target name and the y label :param filename: the filename string :return: protein target name, y_label string (ligand or decoy) """ bname = os.path.basename(filename) splitted_bname = bname.split('_') if len(splitted_bname) == 3: target_name = splitted_bname[0] y_label_str = splitted_bname[1] elif len(splitted_bname) == 4: target_name = '_'.join([splitted_bname[0], splitted_bname[1]]) y_label_str = splitted_bname[2] else: raise ValueError('File name has not expected format. Can not parse file name.') if y_label_str == 'decoys': y_label = 0 elif y_label_str == 'ligands': y_label = 1 else: raise ValueError('File name has not expected format. Can not parse file name.') return target_name, y_label
8f9de132e622feffc513453be36b80f386b36c9c
708,486
def _IsSingleElementTuple(token): """Check if it's a single-element tuple.""" close = token.matching_bracket token = token.next_token num_commas = 0 while token != close: if token.value == ',': num_commas += 1 if token.OpensScope(): token = token.matching_bracket else: token = token.next_token return num_commas == 1
8d675bcee737ddb106817db79e2b989509d2efaa
708,487
import numpy def ReadCan(filename): """Reads the candump in filename and returns the 4 fields.""" trigger = [] trigger_velocity = [] trigger_torque = [] trigger_current = [] wheel = [] wheel_velocity = [] wheel_torque = [] wheel_current = [] trigger_request_time = [0.0] trigger_request_current = [0.0] wheel_request_time = [0.0] wheel_request_current = [0.0] with open(filename, 'r') as fd: for line in fd: data = line.split() can_id = int(data[1], 16) if can_id == 0: data = [int(d, 16) for d in data[3:]] trigger.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0) trigger_velocity.append( ((data[2] + (data[3] << 8)) - 32768) / 32768.0) trigger_torque.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) trigger_current.append( ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0) elif can_id == 1: data = [int(d, 16) for d in data[3:]] wheel.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0) wheel_velocity.append( ((data[2] + (data[3] << 8)) - 32768) / 32768.0) wheel_torque.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) wheel_current.append( ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0) elif can_id == 2: data = [int(d, 16) for d in data[3:]] trigger_request_current.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) trigger_request_time.append(len(trigger) * 0.001) elif can_id == 3: data = [int(d, 16) for d in data[3:]] wheel_request_current.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) wheel_request_time.append(len(wheel) * 0.001) trigger_data_time = numpy.arange(0, len(trigger)) * 0.001 wheel_data_time = numpy.arange(0, len(wheel)) * 0.001 # Extend out the data in the interpolation table. trigger_request_time.append(trigger_data_time[-1]) trigger_request_current.append(trigger_request_current[-1]) wheel_request_time.append(wheel_data_time[-1]) wheel_request_current.append(wheel_request_current[-1]) return (trigger_data_time, wheel_data_time, trigger, wheel, trigger_velocity, wheel_velocity, trigger_torque, wheel_torque, trigger_current, wheel_current, trigger_request_time, trigger_request_current, wheel_request_time, wheel_request_current)
773657474462aa3a129ea7459c72ea0b0dc0cefa
708,488
def delta_t(soil_type): """ Displacement at Tu """ delta_ts = { "dense sand": 0.003, "loose sand": 0.005, "stiff clay": 0.008, "soft clay": 0.01, } return delta_ts.get(soil_type, ValueError("Unknown soil type."))
c542adb7c302bc1f50eb4c49bf9da70932758814
708,489
def user(user_type): """ :return: instance of a User """ return user_type()
a8c8cd4ef57915c555864f6fc09dce63c2a1c6fb
708,490
def true_or_false(item): """This function is used to assist in getting appropriate values set with the PythonOption directive """ try: item = item.lower() except: pass if item in ['yes','true', '1', 1, True]: return True elif item in ['no', 'false', '0', 0, None, False]: return False else: raise Exception
3e7c0cee07f6796c6134b182572a7d5ff95cf42d
708,491
import time def time_ms(): """currently pypy only has Python 3.5.3, so we are missing Python 3.7's time.time_ns() with better precision see https://www.python.org/dev/peps/pep-0564/ the function here is a convenience; you shall use `time.time_ns() // 1e6` if using >=Python 3.7 """ return int(time.time() * 1e3)
1bff241db79007314d7a876ddd007af137ba7306
708,492
import pipes def login_flags(db, host, port, user, db_prefix=True): """ returns a list of connection argument strings each prefixed with a space and quoted where necessary to later be combined in a single shell string with `"".join(rv)` db_prefix determines if "--dbname" is prefixed to the db argument, since the argument was introduced in 9.3. """ flags = [] if db: if db_prefix: flags.append(' --dbname={0}'.format(pipes.quote(db))) else: flags.append(' {0}'.format(pipes.quote(db))) if host: flags.append(' --host={0}'.format(host)) if port: flags.append(' --port={0}'.format(port)) if user: flags.append(' --username={0}'.format(user)) return flags
2c844def8e6f1154a9962d43c858b39b9a7adf2a
708,493
def roi_intersect(a, b): """ Compute intersection of two ROIs. .. rubric:: Examples .. code-block:: s_[1:30], s_[20:40] => s_[20:30] s_[1:10], s_[20:40] => s_[10:10] # works for N dimensions s_[1:10, 11:21], s_[8:12, 10:30] => s_[8:10, 11:21] """ def slice_intersect(a, b): if a.stop < b.start: return slice(a.stop, a.stop) if a.start > b.stop: return slice(a.start, a.start) _in = max(a.start, b.start) _out = min(a.stop, b.stop) return slice(_in, _out) if isinstance(a, slice): if not isinstance(b, slice): b = b[0] return slice_intersect(a, b) b = (b,) if isinstance(b, slice) else b return tuple(slice_intersect(sa, sb) for sa, sb in zip(a, b))
d1070c8ec0c493296dfee6bdc54b7430e703bda8
708,494
def _flows_finished(pgen_grammar, stack): """ if, while, for and try might not be finished, because another part might still be parsed. """ for stack_node in stack: if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'): return False return True
dd0fe435d1328b3ae83ba2507006b6825ca23087
708,495
def PositionToPercentile(position, field_size): """Converts from position in the field to percentile. position: int field_size: int """ beat = field_size - position + 1 percentile = 100.0 * beat / field_size return percentile
c75869f3d7f8437f28d3463fcf12b2b446fe930a
708,496
from typing import Tuple import os from typing import cast def _terminal_size(fallback: Tuple[int, int]) -> Tuple[int, int]: """ Try to get the size of the terminal window. If it fails, the passed fallback will be returned. """ for i in (0, 1): try: window_width = os.get_terminal_size(i) return cast(Tuple[int, int], tuple(window_width)) except OSError: continue return fallback
3254068444167bad0b87479001b0c22887b32a60
708,497
import ast def _compile(s: str): """compiles string into AST. :param s: string to be compiled into AST. :type s: str """ return compile( source = s, filename = '<unknown>', mode = 'eval', flags = ast.PyCF_ONLY_AST, )
4709cfa84ab6e5d7210924cf3aa206a1d297b7bd
708,498
def temp_get_users_with_permission_form(self): """Used to test that swapping the Form method works""" # Search string: ABC return ()
72390791304d62fc5d78720aac4e2807e918587c
708,499
def hi_joseangel(): """ Hi Jose Angel Function """ return "hi joseangel!"
5889a51977d3ec2269040a9a7e7968801209ff25
708,500
def pytest_report_header(config, startdir): """return a string to be displayed as header info for terminal reporting.""" capabilities = config.getoption('capabilities') if capabilities: return 'capabilities: {0}'.format(capabilities)
4e6ada67f5f08c1db8f5b6206089db4e3ee84f46
708,501
def chessboard_distance(x_a, y_a, x_b, y_b): """ Compute the rectilinear distance between point (x_a,y_a) and (x_b, y_b) """ return max(abs(x_b-x_a),abs(y_b-y_a))
9b11bf328faf3b231df23585914f20c2efd02bf9
708,502
def str_with_tab(indent: int, text: str, uppercase: bool = True) -> str: """Create a string with ``indent`` spaces followed by ``text``.""" if uppercase: text = text.upper() return " " * indent + text
3306ba86781d272a19b0e02ff8d06da0976d7282
708,503
from typing import List def finding_the_percentage(n: int, arr: List[str], query_name: str) -> str: """ >>> finding_the_percentage(3, ['Krishna 67 68 69', 'Arjun 70 98 63', ... 'Malika 52 56 60'], 'Malika') '56.00' >>> finding_the_percentage(2, ['Harsh 25 26.5 28', 'Anurag 26 28 30'], ... 'Harsh') '26.50' """ student_marks = {} for i in range(n): name, *line = arr[i].split() scores = list(map(float, line)) student_marks[name] = sum(scores)/len(scores) return '{:.2f}'.format(student_marks[query_name])
86c2ad777c667f9ba424bc2b707f46465a10accc
708,505
def mvg_logpdf_fixedcov(x, mean, inv_cov): """ Log-pdf of the multivariate Gaussian distribution where the determinant and inverse of the covariance matrix are precomputed and fixed. Note that this neglects the additive constant: -0.5 * (len(x) * log(2 * pi) + log_det_cov), because it is irrelevant when comparing pdf values with a fixed covariance, but it means that this is not the normalised pdf. Args: x (1D numpy array): Vector value at which to evaluate the pdf. mean (1D numpy array): Mean vector of the multivariate Gaussian distribution. inv_cov (2D numpy array): Inverted covariance matrix. Returns: float: Log-pdf value. """ dev = x - mean return -0.5 * (dev @ inv_cov @ dev)
648d1925ed4b4793e8e1ce1cec8c7ccd0efb9f6b
708,506
def extrode_multiple_urls(urls): """ Return the last (right) url value """ if urls: return urls.split(',')[-1] return urls
34ec560183e73100a62bf40b34108bb39f2b04b4
708,508
def take_last_while(predicate, list): """Returns a new list containing the last n elements of a given list, passing each value to the supplied predicate function, and terminating when the predicate function returns false. Excludes the element that caused the predicate function to fail. The predicate function is passed one argument: (value)""" for i, e in enumerate(reversed(list)): if not predicate(e): return list[-i:] return list
19468c9130e9ab563eebd97c30c0e2c74211e44b
708,509
def abs_p_diff(predict_table, categA='sandwich', categB='sushi'): """Calculates the absolute distance between two category predictions :param predict_table: as returned by `predict_table` :param categA: the first of two categories to compare :param categB: the second of two categoreis to compare :returns: series with the absolute difference between the predictions :rtype: pandas Series """ return abs(predict_table['p_%s' % categA] - predict_table['p_%s' % categB])
235bfc7df29ac4a2b67baff9dfa3ee62204a9aed
708,512
def _is_target_feature(column_names, column_mapping): """Assert that a feature only contains target columns if it contains any.""" column_names_set = set(column_names) column_types = set(column['type'] for column_name, column in column_mapping.iteritems() if column_name in column_names_set) if 'target' in column_types: assert len(column_types) == 1, ( 'Features with target columns can only contain target columns.' 'Found column_types: %s for columns %s' % (column_types, column_names)) return True else: return False
098af45938c616dd0ff2483a27131f15ba50797b
708,513
def validate_engine_mode(engine_mode): """ Validate database EngineMode for DBCluster Property: DBCluster.EngineMode """ VALID_DB_ENGINE_MODES = ( "provisioned", "serverless", "parallelquery", "global", "multimaster", ) if engine_mode not in VALID_DB_ENGINE_MODES: raise ValueError( "DBCluster EngineMode must be one of: %s" % ", ".join(VALID_DB_ENGINE_MODES) ) return engine_mode
69f7952a998b6ca593106c92710909104e21f55f
708,514
def num_false_positives(df): """Total number of false positives (false-alarms).""" return df.noraw.Type.isin(['FP']).sum()
6aa339b86d15072c6a6910a43e70281575da5d36
708,515
def gcd_recursive_by_divrem(m, n): """ Computes the greatest common divisor of two numbers by recursively getting remainder from division. :param int m: First number. :param int n: Second number. :returns: GCD as a number. """ if n == 0: return m return gcd_recursive_by_divrem(n, m % n)
bd25d9cea4813e523ea6bb9bd85c24bf43dd2744
708,516
def get_mzi_delta_length(m, neff=2.4, wavelength=1.55): """ m*wavelength = neff * delta_length """ return m * wavelength / neff
5bcd4b9b217c79a06b48856f7801060787f12e52
708,517
def create_include(workflow_stat): """ Generates the html script include content. @param workflow_stat the WorkflowInfo object reference """ include_str = """ <script type='text/javascript' src='bc_action.js'> </script> <script type='text/javascript' src='bc_""" + workflow_stat.wf_uuid +"""_data.js'> </script> """ return include_str
24151952c9dd5bc4034916dae90a3760fc06ca44
708,519
import subprocess def run_command_unchecked(command, cwd, env=None): """Runs a command in the given dir, returning its exit code and stdio.""" p = subprocess.Popen( command, cwd=cwd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, ) stdout, _ = p.communicate() exit_code = p.wait() return exit_code, stdout.decode('utf-8', 'replace')
89c6fd0acf7e8bb81c837f78252bdaa30fe39ad1
708,520
import sys def postfix(itemString): """transform infixExpre into postfixExpre Algorithm: step1: if operator, stack in; step2: if "(", stack in. step3: if variable, pop out the all continued unary operator until other operator or "(" step4: if ")", pop out all operators until "(", then pop all continued unary operator. step5: goto step1. Arg: itemString: bitwise expression string persented in infix. Return: itemStr: expression string persented in postfix. """ itemStr = "" boperatorList = ["&", "|", "^"] uoperator = "~" opeList = [] for (idx, char) in enumerate(itemString): #open parenthesis, stack it if char == "(": opeList.append(char) #binary operatork, stack it elif char in boperatorList: opeList.append(char) #unary operator elif uoperator in char: opeList.append(char) #closed parenthesis, pop out the operator to string elif char == ")": while(opeList and opeList[-1] != "("): itemStr += opeList[-1] opeList.pop() if opeList and opeList[-1] != "(": print("error!") sys.exit(0) #open parenthesis found opeList.pop() #unary operator found before open parenthesis while(opeList and opeList[-1] == "~"): itemStr += opeList[-1] opeList.pop() #variable name found else: itemStr += char #top of stack is unary operator while(opeList and opeList[-1] in uoperator): itemStr += opeList[-1] opeList.pop() if len(opeList) > 1: print("error in function postfix!") sys.exit(0) #have one operator without parenthesis elif len(opeList): itemStr += opeList[0] return itemStr
1c3bee30f450c1dfab6ca7d0dd057465d8b6e8e5
708,521
def getSuffixes(algorithm, seqType) : """ Get the suffixes for the right algorithm with the right sequence type """ suffixes = {} suffixes['LAST'] = {} suffixes['BLAST'] = {} suffixes['BLAST']['nucl'] = ['nhr', 'nsq', 'nin'] suffixes['BLAST']['prot'] = ['phr', 'psq', 'pin'] suffixes['LAST']['nucl'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ] suffixes['LAST']['prot'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ] if not algorithm in suffixes: return None if not seqType in suffixes[algorithm]: return None return suffixes[algorithm][seqType]
9ab699a71be73381c4dff555f0ef19201589e82f
708,522
def compare_names(namepartsA, namepartsB): """Takes two name-parts lists (as lists of words) and returns a score.""" complement = set(namepartsA) ^ set(namepartsB) intersection = set(namepartsA) & set(namepartsB) score = float(len(intersection))/(len(intersection)+len(complement)) return score
87cbceaaa0acce0b83b5faf66cbe909ad52382eb
708,523
def commonprefix(a, b): """Find longest common prefix of `a` and `b`.""" pos = 0 length = min(len(a), len(b)) while pos < length and a[pos] == b[pos]: pos += 1 return pos, b
75e2f9ac6c3d0c38986cba5f8409ddc87fe8edbe
708,524
import math def get_polyend_circle_angles(a, b, isLeft): """ theta0 = pi/2 + betta, theta1 = 2 * pi + betta; betta = pi/2 - alpha; alpha = atan(a) """ if a is None and b is None: return None, None alpha = math.pi / 2.0 if a is None else math.atan(a) betta = math.pi / 2.0 - alpha shift = 0.0 if isLeft else math.pi theta0 = betta + shift theta1 = theta0 + math.pi return theta0, theta1
9547ba4ea9f74cba3d52d90bb24dc8c4b246fbff
708,525
import re def get_search_cache_key(prefix, *args): """ Generate suitable key to cache twitter tag context """ key = '%s_%s' % (prefix, '_'.join([str(arg) for arg in args if arg])) not_allowed = re.compile('[^%s]' % ''.join([chr(i) for i in range(33, 128)])) key = not_allowed.sub('', key) return key
f3ff5baa13e4e84deb5c13cd8d5b618ba75c8699
708,526
def set_prior_6(para): """ set prior before the first data came in doc details to be added """ n_shape = para['n_shape'] log_prob = [ [] for i_shape in range(n_shape) ] delta_mean = [ [] for i_shape in range(n_shape) ] delta_var = [ [] for i_shape in range(n_shape) ] time_since_last_cp = [ [] for i_shape in range(n_shape) ] return log_prob, delta_mean, delta_var, time_since_last_cp
e97944e1c48ca6def16308584dfe04eaebae6259
708,527
def overviewUsage(err=''): """ default overview information highlighting active scripts""" m = '%s\n' %err m += ' The following scripts allow you to manage Team Branches (TmB) on SalesForce.\n' m += ' Use one of the scripts below to meet your needs.\n' m += ' \n' m += ' 1. First link Task Branches to Team Branches \n' m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> \n' m += ' \n' m += ' 2. List Task Branches linked to a Team Branches \n' m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -d \n' m += ' \n' m += ' 3. First link Task Branches to Team Branches \n' m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -p <low|medium|high|urgent|critical> \n' m += ' \n' return m
ba62773dd8be21d17c44e8e295c8228d568512a0
708,528
def lorem(): """Returns some sample latin text to use for prototyping.""" return """ Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """
6ddacdb23b7c62cf930e622a7fd801b514a419ae
708,529
def make_list_table(headers, data, title='', columns=None): """Build a list-table directive. :param headers: List of header values. :param data: Iterable of row data, yielding lists or tuples with rows. :param title: Optional text to show as the table title. :param columns: Optional widths for the columns. """ results = [] add = results.append add('.. list-table:: %s' % title) add(' :header-rows: 1') if columns: add(' :widths: %s' % (','.join(str(c) for c in columns))) add('') add(' - * %s' % headers[0]) for h in headers[1:]: add(' * %s' % h) for row in data: add(' - * %s' % row[0]) for r in row[1:]: add(' * %s' % r) add('') return '\n'.join(results)
569370b8359ad25bf255f940b5a89d93d896804d
708,530
def split_val_condition(input_string): """ Split and return a {'value': v, 'condition': c} dict for the value and the condition. Condition is empty if no condition was found. @param input A string of the form XXX @ YYYY """ try: (value, condition) = [x.strip() for x in input_string.split('@')] return {'value': value, 'condition': condition} except ValueError: # no condition was found return {'value': input_string.strip(), 'condition': None}
97c5733a80b3348928b95e2430bf3630867b2050
708,531
def pre_process(dd, df, dataset_len, batch_size): """Partition one dataframe to multiple small dataframes based on a given batch size.""" df = dd.str2ascii(df, dataset_len) prev_chunk_offset = 0 partitioned_dfs = [] while prev_chunk_offset < dataset_len: curr_chunk_offset = prev_chunk_offset + batch_size chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1] partitioned_dfs.append(chunk) prev_chunk_offset = curr_chunk_offset return partitioned_dfs
a0a19916d60476430bdaf27f85f31620f2b5ae2a
708,532
def _make_unique(key, val): """ Make a tuple of key, value that is guaranteed hashable and should be unique per value :param key: Key of tuple :param val: Value of tuple :return: Unique key tuple """ if type(val).__hash__ is None: val = str(val) return key, val
65d746276f635c129aa0a5aeb9b9f467453c0b2a
708,533
def headline( in_string, surround = False, width = 72, nr_spaces = 2, spacesym = ' ', char = '=', border = None, uppercase = True, ): """return in_string capitalized, spaced and sandwiched: ============================== T E S T =============================== Parameters are the following: * char (one-letter string, default='='): changes the character the title is put between. * surround (boolean, default=False): adds additional lines above and under in_string: ==================================================== ==================== T E S T ===================== ==================================================== * width (int, default=72): defines the width of each line. * nr_spaces (int, default=2): defines number of nr_spaces between in_string and the char as indicated in ..====__T I T L E__====.. . * spacesym (one-letter string, default=' '): instead of using a whitespace to seperate the 'title' letters, one can use every other character, e.g. '_'. * border (either string or list/tuple of two strings; defaults to char): If this is a single character string, it will be used at the left and right end of the headline. If this is multiple character string, it will be used at the left and mirrored at the right. This way you can easily introduce additional space if you prefer and use, for example c style like inline comments with border="/*". If this is not enough for you, the left and right borders can be given seperately, like in border=("<!--", "-->") * uppercase (boolean, default=True): if True, headline will capitalize the letters given by in_string. if False, in_string will be used as it is given. """ if isinstance(border, tuple) or isinstance(border, list): left_border = border[0] right_border = border[1] else: if border is None: border = char left_border = border right_border = border[::-1] nr_sym_spaces = len(left_border + right_border) headline_text = spacesym.join( l.upper() if uppercase else l for l in in_string ) headline_text_sandwiched = '{:{}^{}}'.format( headline_text, spacesym, 2 * (len(in_string) + nr_spaces) - 1 ) headline_without_sym = '{:{}^{}}'.format( headline_text_sandwiched, char, width - nr_sym_spaces ) headline_full = '{1}{0}{2}'.format( headline_without_sym, left_border, right_border ) if surround: line = '{1}{0}{2}'.format( (width - nr_sym_spaces) * char, left_border, right_border ) output = line + '\n' + headline_full + '\n' + line else: output = headline_full return output
1848d91bbf6c9d2216338f35433a26bcd3854664
708,534
def try_(func, *args, **kwargs): """Try to call a function and return `_default` if it fails Note: be careful that in order to have a fallback, you can supply the keyword argument `_default`. If you supply anything other than a keyword arg, it will result in it being passed to the wrapped function and could cause unexpected behavior including always failing with default value of None. """ _default_val = kwargs.pop("_default", None) try: return func(*args, **kwargs) except Exception: # pylint: disable=broad-except return _default_val
206b25bd2e345d9cd6423e2cbc2706c274f36c89
708,535
def get_label_names(l_json): """ Get names of all the labels in given json :param l_json: list of labels jsons :type l_json: list :returns: list of labels names :rtype: list """ llist = [] for j in l_json: llist.append(j['name']) return llist
bab12bedc8b5001b94d6c5f02264b1ebf4ab0e99
708,536
def _n_pow_i(a, b, n): """ return (1+i)**k """ x = a y = b for i in range(1, n): x1 = (x*a) - (y*b) y1 = (y*a) + (x*b) x = x1 y = y1 return x, y
35b00c7bc76aaf19a5acdf012e63c9c0c50e5d1d
708,537
def cg_file_h(tmpdir): """Get render config.""" return { 'cg_file': str(tmpdir.join('muti_layer_test.hip')) }
caedb2324953e4ca90ebffdf80be60fed1b8026d
708,538
def interpolate_peak(spectrum: list, peak: int) -> float: """ Uses quadratic interpolation of spectral peaks to get a better estimate of the peak. Args: - spectrum: the frequency bin to analyze. - peak: the location of the estimated peak in the spectrum list. Based off: https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html """ prev_neighbour = spectrum[peak-1] next_neighbour = spectrum[peak+1] peak_value = spectrum[peak] estimated_peak = (next_neighbour - prev_neighbour) / (2 * peak_value - prev_neighbour - next_neighbour) + peak return abs(estimated_peak)
0e74057908e7839438325da9adafdf385012ce17
708,539
def find_title(item): """Title of the video""" title = item['snippet']['title'] return title
9c6f64e02d959d46cfd1e4536f5faf7ec0c281bd
708,540
import hashlib def calc_fingerprint(text): """Return a hex string that fingerprints `text`.""" return hashlib.sha1(text).hexdigest()
8be154e4e32ae9412a73e73397f0e0198ae9c862
708,541
import six def pad_for_tpu(shapes_dict, hparams, max_length): """Pads unknown features' dimensions for TPU.""" padded_shapes = {} def get_filler(specified_max_length): if not specified_max_length: return max_length return min(specified_max_length, max_length) inputs_none_filler = get_filler(hparams.max_input_seq_length) targets_none_filler = get_filler(hparams.max_target_seq_length) def pad_one_shape(shape, none_filler): return [ (dim if dim is not None else none_filler) for dim in shape.as_list() ] for key, shape in six.iteritems(shapes_dict): if key == "inputs": padded_shapes[key] = pad_one_shape(shape, inputs_none_filler) elif key == "targets": padded_shapes[key] = pad_one_shape(shape, targets_none_filler) else: padded_shapes[key] = pad_one_shape(shape, max_length) return padded_shapes
b72e1463fad9740c8a265b795c4b3c5a45e42a9a
708,542
def has_balanced_parens(exp: str) -> bool: """ Checks if the parentheses in the given expression `exp` are balanced, that is, if each opening parenthesis is matched by a corresponding closing parenthesis. **Example:** :: >>> has_balanced_parens("(((a * b) + c)") False :param exp: The expression to check. :return: `True` if the parentheses are balanced, `False` otherwise. """ # Use a stack to determine if the expression is balanced. # Ref: https://youtu.be/HJOnJU77EUs?t=75 [1:15 - 2:47] paren_stack = [] for e in exp: if e == '(': paren_stack.append(e) elif e == ')': try: paren_stack.pop() except IndexError: return False return len(paren_stack) == 0
f76c7cafcf6aadd0c2cb947f0c49d23835a9f6e4
708,543
def _is_binary(c): """Ensures character is a binary digit.""" return c in '01'
b763a5a8ba591b100fea64a589dcb0aea9fbcf53
708,544
def read_frame_positions(lmp_trj): """ Read stream positions in trajectory file corresponding to time-step and atom-data. """ ts_pos, data_pos = [], [] with open(lmp_trj, 'r') as fid: while True: line = fid.readline() if not line: break if line.startswith('ITEM: TIMESTEP'): ts_pos.append(fid.tell()) elif line.startswith('ITEM: ATOMS id'): data_pos.append(fid.tell()) return ts_pos, data_pos
c168f08577e38758bf3d9d42bae8379125d7fc33
708,545
def home(): """ Display Hello World in a local-host website """ return 'Hello World'
f65a035d679878cfd897c9ea9c79fc41cf76db95
708,546
def sum_by_letter(list_of_dicts, letter): """ :param list_of_dicts: A list of dictionaries. :param letter: A value of the letter keyed by 'letter'. """ total = 0 for d in list_of_dicts: if d['letter'] == letter: total += d['number'] return total
bffc5990eaa9e352d60d86d40b8a8b7070fd00c0
708,547
def gate_settle(gate): """ Return gate settle times """ return 0
f452a343550c4f7be2133119c89dc386665921c4
708,548
def strip_trailing_characters(unstripped_string, tail): """ Strip the tail from a string. :param unstripped_string: The string to strip. Ex: "leading" :param tail: The trail to remove. Ex: "ing" :return: The stripped string. Ex: "lead" """ if unstripped_string.endswith(str(tail)): return unstripped_string[:len(tail)] else: return unstripped_string
dbd09fe9a58b0fb3072a680a9c7ac701257ebfcd
708,549
def is_prime(x): """ Prove if number is prime """ if x == 0 or x == 1: return 0 for i in range(2, x//2 +1): if x % i == 0: return 0 return 1
63980c49b9ea05458ecafe874073805df50ce1d0
708,550
import re def isphone(value, locale='en-US'): """ Return whether or not given value is valid mobile number according to given locale. Default locale is 'en-US'. If the value is valid mobile number, this function returns ``True``, otherwise ``False``. Supported locales are: ``ar-DZ``, ``ar-SY``, ``ar-SA``, ``en-US``, ``en-CA``, ``cs-CZ``, ``de-DE``, ``da-DK`` ``el-GR``, ``en-AU``, ``en-GB``, ``en-HK``, ``zh-HK``, ``en-IN``, ``en-NG``, ``en-NZ``, ``en-ZA``, ``en-ZM`` ``es-ES``, ``fi-FI``, ``fr-FR``, ``he-IL``, ``hu-HU``, ``id-ID``, ``it-IT``, ``ja-JP``, ``ms-MY``, ``nb-NO`` ``nl-BE``, ``fr-BE``, ``nn-NO``, ``pl-PL``, ``pt-BR``, ``pt-PT``, ``ro-RO``, ``en-PK``, ``ru-RU``, ``sr-RS`` ``tr-TR``, ``vi-VN``, ``zh-CN``, ``zh-TW``, ``bn-BD`` Examples:: >>> isphone('+15673628910', 'en-US') True >>> isphone('+10345672645', 'en-US') False :param value: string to validate mobile number :param locale: locale of mobile number to validate """ phones = { 'ar-DZ': r'^(\+?213|0)(5|6|7)\d{8}$', 'ar-SY': r'^(!?(\+?963)|0)?9\d{8}$', 'ar-SA': r'^(!?(\+?966)|0)?5\d{8}$', 'bn-BD': r'^(\+?88)?(01[56789]\d{2}(\s|\-)?\d{6})$', 'en-US': r'^(\+?1)?[2-9]\d{2}[2-9](?!11)\d{6}$', 'cs-CZ': r'^(\+?420)? ?[1-9][0-9]{2} ?[0-9]{3} ?[0-9]{3}$', 'de-DE': r'^(\+?49[ \.\-])?([\(]{1}[0-9]{1,6}[\)])?([0-9 \.\-\']{3,20})((x|ext|extension)[ ]?[0-9]{1,4})?$', 'da-DK': r'^(\+?45)?(\d{8})$', 'el-GR': r'^(\+?30)?(69\d{8})$', 'en-AU': r'^(\+?61|0)4\d{8}$', 'en-GB': r'^(\+?44|0)7\d{9}$', 'en-HK': r'^(\+?852\-?)?[569]\d{3}\-?\d{4}$', 'en-IN': r'^(\+?91|0)?[789]\d{9}$', 'en-NG': r'^(\+?234|0)?[789]\d{9}$', 'en-NZ': r'^(\+?64|0)2\d{7,9}$', 'en-ZA': r'^(\+?27|0)\d{9}$', 'en-ZM': r'^(\+?26)?09[567]\d{7}$', 'es-ES': r'^(\+?34)?(6\d{1}|7[1234])\d{7}$', 'fi-FI': r'^(\+?358|0)\s?(4(0|1|2|4|5)?|50)\s?(\d\s?){4,8}\d$', 'fr-FR': r'^(\+?33|0)[67]\d{8}$', 'he-IL': r'^(\+972|0)([23489]|5[0248]|77)[1-9]\d{6}', 'hu-HU': r'^(\+?36)(20|30|70)\d{7}$', 'id-ID': r'^(\+?62|0[1-9])[\s|\d]+$', 'it-IT': r'^(\+?39)?\s?3\d{2} ?\d{6,7}$', 'ja-JP': r'^(\+?81|0)\d{1,4}[ \-]?\d{1,4}[ \-]?\d{4}$', 'ms-MY': r'^(\+?6?01){1}(([145]{1}(\-|\s)?\d{7,8})|([236789]{1}(\s|\-)?\d{7}))$', 'nb-NO': r'^(\+?47)?[49]\d{7}$', 'nl-BE': r'^(\+?32|0)4?\d{8}$', 'nn-NO': r'^(\+?47)?[49]\d{7}$', 'pl-PL': r'^(\+?48)? ?[5-8]\d ?\d{3} ?\d{2} ?\d{2}$', 'pt-BR': r'^(\+?55|0)\-?[1-9]{2}\-?[2-9]{1}\d{3,4}\-?\d{4}$', 'pt-PT': r'^(\+?351)?9[1236]\d{7}$', 'ro-RO': r'^(\+?4?0)\s?7\d{2}(\'|\s|\.|\-)?\d{3}(\s|\.|\-)?\d{3}$', 'en-PK': r'^((\+92)|(0092))-{0,1}\d{3}-{0,1}\d{7}$|^\d{11}$|^\d{4}-\d{7}$', 'ru-RU': r'^(\+?7|8)?9\d{9}$', 'sr-RS': r'^(\+3816|06)[- \d]{5,9}$', 'tr-TR': r'^(\+?90|0)?5\d{9}$', 'vi-VN': r'^(\+?84|0)?((1(2([0-9])|6([2-9])|88|99))|(9((?!5)[0-9])))([0-9]{7})$', 'zh-CN': r'^(\+?0?86\-?)?1[345789]\d{9}$', 'zh-TW': r'^(\+?886\-?|0)?9\d{8}$' } phones['en-CA'] = phones['en-US'] phones['fr-BE'] = phones['nl-BE'] phones['zh-HK'] = phones['en-HK'] loc = phones.get(locale) if loc is None: raise ValueError('Please provide a supported locale.') else: loc_pattern = re.compile(loc) return bool(loc_pattern.match(value))
2e3de8fb6aad000c21ea560521f81c4e9bf2e090
708,551
def _darken(color): """ Takes a hexidecimal color and makes it a shade darker :param color: The hexidecimal color to darken :return: A darkened version of the hexidecimal color """ # Get the edge color darker = "#" hex1 = color[1:3] hex2 = color[3:5] hex3 = color[5:7] for val in [hex1, hex2, hex3]: if val == "00": darker += "00" else: x = int(val, base=16) x -= int("11", base=16) x = str(hex(x))[2:].upper() darker += x return darker
5b43785572f9685906e73f4bf856cf4d693f6411
708,552
def flatten_acfg_list(acfg_list): """ Returns a new config where subconfig params are prefixed by subconfig keys """ flat_acfg_list = [] for acfg in acfg_list: flat_dict = { prefix + '_' + key: val for prefix, subdict in acfg.items() for key, val in subdict.items() } flat_acfg_list.append(flat_dict) return flat_acfg_list
ae586bc49ee31db022f388492acbbf5e8d02b09d
708,553
from typing import Optional def q_to_res(Q: float) -> Optional[float]: """ :param Q: Q factor :return: res, or None if Q < 0.25 """ res = 1 - 1.25 / (Q + 1) if res < 0.0: return None return res
98380be0c8fbd3bfd694d7851f35488d74cdd862
708,554
def id_str_to_bytes(id_str: str) -> bytes: """Convert a 40 characters hash into a byte array. The conversion results in 160 bits of information (20-bytes array). Notice that this operation is reversible (using `id_bytes_to_str`). Args: id_str: Hash string containing 40 characters. Returns: bytes: The ID converted to bytes. """ return int(id_str, 16).to_bytes(20, byteorder='big')
cd6a702343f1267e17710305f9aed70613feacb3
708,555
def create_process_chain_entry(input_object, python_file_url, udf_runtime, udf_version, output_object): """Create a Actinia command of the process chain that uses t.rast.udf :param strds_name: The name of the strds :param python_file_url: The URL to the python file that defines the UDF :param output_name: The name of the output raster layer :return: A Actinia process chain description """ # rn = randint(0, 1000000) pc = {"id": "t_rast_udf", "module": "t.rast.udf", "inputs": [{"import_descr": {"source": python_file_url, "type": "file"}, "param": "pyfile", "value": "$file::my_py_func"}, {"param": "input", "value": input_object.grass_name()}, {"param": "output", "value": output_object.grass_name()}]} return pc
78a76275a2f1dba30627f1a52acd88d2ce851ccc
708,556
import random def define_answer(defined_answer): """ ランダムに「正解」を生成する 1桁ずつ、0~15までの乱数を引いて決めていく count桁目の乱数(digit_kari)を引いた時、count-1桁目までの数字と重複がないかをチェック。  重複がなければ、引いた乱数(digit_kari)をans_list[count]に保存。  重複してたらその桁の乱数を引き直す。 """ global ans_str #,ans_list if type(defined_answer) == str and len(defined_answer) == 5: ans_str = defined_answer return defined_answer else: ans_list = [0, 0, 0, 0, 0] ans_str = "" digit_kari = 0 count = 0 check = 0 while count < 5: if count == 0: ans_list[count] = random.randint(0,15) count += 1 else: digit_kari = random.randint(0,15) for j in range(count): if ans_list[j] == digit_kari: check = -1 if check == 0: ans_list[count] = digit_kari count += 1 else: check = 0 for i in range(5): ans_str += str(hex(ans_list[i]))[2] print("answer:"+ans_str) #あらかじめ答えを知りたいときのみ有効化する return ans_str
fa19b2e28864d4c09458582d6dea80b81b3426f6
708,557