content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import contextlib import socket def get_available_port() -> int: """Finds and returns an available port on the system.""" with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.bind(('', 0)) _, port = sock.getsockname() return int(port)
c86de127fb237662052b8ce010e99d271836e1ef
708,905
def prettyprint_float(val, digits): """Print a floating-point value in a nice way.""" format_string = "%." + f"{digits:d}" + "f" return (format_string % val).rstrip("0").rstrip(".")
ba62671d9cb8061744fbf1e070e76c31d0ba185d
708,906
def dataframe_is_one_query_target_pair(dataframe): """ make sure there is only one query sequence and reference sequence in the given dataframe. Used to check that we aren't aggregating % identity numbers across bin alignment pairs. :param dataframe: :return: """ num_query_bins = len(dataframe['query bin'].unique()) num_ref_bins = len(dataframe['ref bin'].unique()) if not num_query_bins == 1: "Dataframe has a mix of {} query bins: {}".format( num_query_bins, dataframe['query bin'].unique()) if not num_ref_bins == 1: "Dataframe has a mix of {} reference bins: {}".format( num_query_bins, dataframe['ref bin'].unique()) if (num_query_bins == 1) & (num_ref_bins == 1): return True else: return False
8a8aba9f4b2eaaca6971bf5c158d043a033d0ec8
708,907
def cleanGender(x): """ This is a helper funciton that will help cleanup the gender variable. """ if x in ['female', 'mostly_female']: return 'female' if x in ['male', 'mostly_male']: return 'male' if x in ['couple'] : return 'couple' else: return 'unknownGender'
23d71f2307aa829312f4a1d2a002ae2b55556050
708,908
import re def is_doi(identifier: str) -> bool: """Validates if identifier is a valid DOI Args: identifier (str): potential doi string Returns: bool: true if identifier is a valid DOI """ doi_patterns = [ r"(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?![\"&\'])\S)+)", r"(10.\d{4,9}/[-._;()/:A-Z0-9]+)", r"(10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d)", r"(10.1021/\w\w\d+)", r"(10.1207/[\w\d]+\&\d+_\d+)", ] for pattern in doi_patterns: match = bool(re.match(pattern, identifier)) if match: return True return False
5c0bfe0527adbf53e89d302ee05feb80d285db64
708,910
import importlib def create_agent(opt): """Create an agent from the options model, model_params and model_file. The input is either of the form "parlai.agents.ir_baseline.agents/IrBaselineAgent" (i.e. the path followed by the class name) or else just 'IrBaseline' which assumes the path above, and a class name suffixed with 'Agent' """ dir_name = opt['model'] if ':' in dir_name: s = dir_name.split(':') module_name = s[0] class_name = s[1] else: module_name = "parlai.agents.%s.agents" % (dir_name) words = opt['model'].split('_') class_name = '' for w in words: class_name += ( w[0].upper() + w[1:]) class_name += 'Agent' print(class_name) my_module = importlib.import_module(module_name) model_class = getattr(my_module, class_name) return model_class(opt)
6f5793ee0af7ed677f47c27ba5b94ad6f80ea957
708,911
def dfa_intersection(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the intersection of the DFAs in input. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two DFAs. Then there is a DFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when both accept. It is defined as: :math:`A_∧ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, F_1 × F_2 )` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2}= ρ_2 (s_2 , a)` Implementation proposed guarantees the resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the intersected DFA. """ intersection = { 'alphabet': dfa_1['alphabet'].intersection(dfa_2['alphabet']), 'states': {(dfa_1['initial_state'], dfa_2['initial_state'])}, 'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']), 'accepting_states': set(), 'transitions': dict() } boundary = set() boundary.add(intersection['initial_state']) while boundary: (state_dfa_1, state_dfa_2) = boundary.pop() if state_dfa_1 in dfa_1['accepting_states'] \ and state_dfa_2 in dfa_2['accepting_states']: intersection['accepting_states'].add((state_dfa_1, state_dfa_2)) for a in intersection['alphabet']: if (state_dfa_1, a) in dfa_1['transitions'] \ and (state_dfa_2, a) in dfa_2['transitions']: next_state_1 = dfa_1['transitions'][state_dfa_1, a] next_state_2 = dfa_2['transitions'][state_dfa_2, a] if (next_state_1, next_state_2) not in intersection['states']: intersection['states'].add((next_state_1, next_state_2)) boundary.add((next_state_1, next_state_2)) intersection['transitions'][(state_dfa_1, state_dfa_2), a] = \ (next_state_1, next_state_2) return intersection
ea69f3cda2bd28f5b70d1724ffdd628daf1beffa
708,912
import re def find_links(text, image_only=False): """ Find Markdown links in text and return a match object. Markdown links are expected to have the form [some txt](A-url.ext) or ![Alt text](cool-image.png). Parameters ---------- text : str Text in which to search for links. image_only : bool If ``True``, find only markdown image links, i.e. those that begin with an exclamation mark. Returns ------- list List of ``re.Match`` objects, one for each link found. Each object has two named groups, 'link_text', which contains the the part between the square brackets, and 'link',which is the URL (or file name for an image). """ if image_only: markdown_link = \ re.compile(r"!\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)", flags=re.MULTILINE) else: markdown_link = \ re.compile(r"!?\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)", flags=re.MULTILINE) groups = [m for m in markdown_link.finditer(text)] return groups
5f96672b48d3d911faf2e398c86f622676263d73
708,913
def least_one_row(data_frame): """ checking at least one row in dataframe Input: pandas dataframe Output: True or False """ if data_frame: return True return False
a72cbd3d504140547233481ec8340a8510e35f52
708,914
def manhattan_distance(origin, destination): """Return the Manhattan distance between the origin and the destination. @type origin: Location @type destination: Location @rtype: int >>> pt1 = Location(1,2) >>> pt2 = Location(3,4) >>> print(manhattan_distance(pt1, pt2)) 4 """ return (abs(origin.row - destination.row) + abs(origin.column - destination.column))
0bcfd7767e44b0dcc47890dc4bcb2c054abb4bde
708,915
def parse_model_value(value, context): """ do interpolation first from context, "x is {size}" with size = 5 will be interpolated to "x is 5" then return interpolated string :param value: :param context: :return: """ return value.format(**context)
58cee6092bc03debe636ae8fa47878727457d334
708,916
def L008_eval(segment, raw_stack, **kwargs): """ This is a slightly odd one, because we'll almost always evaluate from a point a few places after the problem site """ # We need at least two segments behind us for this to work if len(raw_stack) < 2: return True else: cm1 = raw_stack[-1] cm2 = raw_stack[-2] if cm2.name == 'comma': if cm1.name not in ['whitespace', 'newline']: # comma followed by something that isn't whitespace! return cm2 elif cm1.raw not in ['\n', ' '] and not segment.is_comment: return cm1 return True
71c42999ffc76bd28a61b640cf85086b0b9e8d69
708,917
def overwrite_ruffus_args(args, config): """ :param args: :param config: :return: """ if config.has_section('Ruffus'): cmdargs = dict() cmdargs['draw_horizontally'] = bool cmdargs['flowchart'] = str cmdargs['flowchart_format'] = str cmdargs['forced_tasks'] = lambda x: x.split() cmdargs['history_file'] = str cmdargs['jobs'] = int cmdargs['just_print'] = bool cmdargs['key_legend_in_graph'] = bool cmdargs['log_file'] = str cmdargs['recreate_database'] = bool cmdargs['target_tasks'] = lambda x: x.split() cmdargs['touch_files_only'] = bool cmdargs['use_threads'] = bool cmdargs['verbose'] = lambda x: x.split() for k, v in config.items('Ruffus'): try: args.__setattr__(k, cmdargs[k](v)) except KeyError: pass return args
6f947c362a37bfdc6df53c861783604999621a88
708,918
def pot_rho_linear(SP, t, rho0=1025, a=2e-4, b=7e-4, SP0=35, t0=15): """ Potential density calculated using a linear equation of state: Parameters ---------- SP : array-like Salinity [g/kg] t : array-like Temperature [°C] rho0 : float, optional Constant density [kg/m^3] a : float, optional Thermal expansion coefficient [1/°C] b : float, optional saline expansion coefficient [kg/g] SP0 : float, optional Constant salinity [g/kg] t0 : float, optional Constant temperature [°C] Returns ------- pot_rho : ndarray Potential density [kg/m^3] """ return rho0 * (1 - a * (t - t0) + b * (SP - SP0))
47dd8248239d2147ff50d1b179d3fc4392c173cb
708,919
def num_zeros_end(num): """ Counts the number of zeros at the end of the number 'num'. """ iszero = True num_zeros = 0 i = len(num)-1 while (iszero == True) and (i != 0): if num[i] == "0": num_zeros += 1 elif num[i] != "0": iszero = False i -= 1 return num_zeros
f227cce65e26a0684a10755031a4aeff2156015a
708,920
import csv import numpy def load_csv(file, shape=None, normalize=False): """ Load CSV file. :param file: CSV file. :type file: file like object :param shape : data array is reshape to this shape. :type shape: tuple of int :return: numpy array """ value_list = [] for row in csv.reader(file): value_list.append(map(float, row)) if shape is None: return numpy.array(value_list) else: return numpy.array(value_list).reshape(shape)
07f3b61bbdb6c9937f3cc4b0ae98fdfb7d8de48a
708,921
import sympy def preprocess(function): """ Converts a given function from type str to a Sympy object. Keyword arguments: function -- a string type representation of the user's math function """ expr = function while True: if '^' in expr: expr = expr[:expr.index('^')] + '**' + expr[expr.index('^')+1:] else: break expr = sympy.sympify(expr) return expr
001bd04d27db2afa4debbe776e5fe3cf1af1476d
708,922
import re def tot_changes(changes: str) -> int: """Add deletions and insertions.""" insertions_pat = re.compile(r"(\d+) insertion") deletions_pat = re.compile(r"(\d+) deletion") insertions = insertions_pat.search(changes) insertions = int(insertions.group(1)) if insertions else 0 deletions = deletions_pat.search(changes) deletions = int(deletions.group(1)) if deletions else 0 return insertions + deletions
74742baf63db51b5c59b332f0104008500f330b9
708,923
import ipaddress def is_ip_network(network, strict=False): """Returns True/False if a string is a valid network.""" network = str(network) try: ipaddress.ip_network(network, strict) return True except ValueError: return False
84206586412b76816fa845a75fc6c121bfdf0989
708,924
def datatable(module, tag): """Mapping for DataTable.""" if tag == "DataTable": return module, tag
1eaa06771ecdd99dfa102ec249b23db3999b6fd7
708,925
def remove_prepending(seq): """ Method to remove prepending ASs from AS path. """ last_add = None new_seq = [] for x in seq: if last_add != x: last_add = x new_seq.append(x) is_loopy = False if len(set(seq)) != len(new_seq): is_loopy = True # raise Exception('Routing Loop: {}'.format(seq)) return new_seq, is_loopy
78bb1554678af0998e15ecf9ed8f4e379ac2e2ad
708,926
import numpy def _sample_perc_from_list(lst, perc=100, algorithm="cum_rand", random_state=None): """ Sample randomly a certain percentage of items from the given list. The original order of the items is kept. :param lst: list, shape = (n,), input items :param perc: scalar, percentage to sample :param algorithm: string, which algorithm should be used "random": Decide for each item to be chosen or not. This algorithm runs in linear time O(n), but the percentages might not match exactly. "cum_rand": O(n log(n) + perc) :return: list """ if perc >= 100: return lst if perc <= 0: return [] # Store old random state and set random state rs_old = numpy.random.get_state() numpy.random.seed(random_state) if algorithm == "random": lst_sub = [it for it in lst if numpy.random.uniform(high=100) <= perc] elif algorithm == "cum_rand": n = len(lst) n_perc = numpy.round(n * perc / 100.0) rank_its = numpy.argsort(numpy.random.uniform(size=n)) lst_sub = [] for idx, it in enumerate(lst): if rank_its[idx] < n_perc: lst_sub.append(it) if len(lst_sub) > n_perc: break else: raise ValueError("Invalid sampling algorithm: %s." % algorithm) # Restore old random stat numpy.random.set_state(rs_old) return lst_sub
4ec000e9bd8f5e10550040e49018e2a045659397
708,927
import os def get_img_full_path(path): """ Checks if file can be found by path specified in the input. Returns the same as input if can find, otherwise joins current directory full path with path from input and returns it. :param path: Relative of full path to the image. :return: Relative of full path to the image (joined with path to current directory if needed). """ if os.path.isfile(path): return path else: directory = os.path.dirname(__file__) new_path = os.path.join(directory, path) if os.path.isfile(new_path): return new_path else: raise IOError("File not found: " + path)
d549cd09035ebd6213f1b31c1c2eee4e64dcdce8
708,928
import logging import sys import os def get_logger(filename, logger_name=None): """set logging file and format Args: filename: str, full path of the logger file to write logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' Return: logger: python logger """ log_format = "%(asctime)s %(message)s" logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt="%m%d %I:%M:%S %p") # different name is needed when creating multiple logger in one process logger = logging.getLogger(logger_name) fh = logging.FileHandler(os.path.join(filename)) fh.setFormatter(logging.Formatter(log_format)) logger.addHandler(fh) return logger
e549cdd7198961662f28390e09480d05f5ad14b4
708,929
def sao_isomorficas(texto1: str, texto2: str) -> bool: """ >>> sao_isomorficas('egg', 'add') True >>> sao_isomorficas('foo', 'bar') False >>> sao_isomorficas('eggs', 'add') False """ # Algoritmo O(n) em tempo e memória letras_encontradas = {} if len(texto1) != len(texto2): return False for caractere_1, caractere_2 in zip(texto1, texto2): try: letra = letras_encontradas[caractere_1] except KeyError: letras_encontradas[caractere_1] = caractere_2 else: if letra is not caractere_2: return False return True
a1f2c00a50b69cb18c32a299d50cbd3a35dcbe5e
708,930
from typing import Dict import random def pick_char_from_dict(char: str, dictionary: Dict[str, str]) -> str: """ Picks a random format for the givin letter in the dictionary """ return random.choice(dictionary[char])
c593166ef7cb8c960b8c4be8fa0f8a20ec616f00
708,931
from typing import List import ctypes def swig_py_object_2_list_int(object, size : int) -> List[int]: """ Converts SwigPyObject to List[float] """ y = (ctypes.c_float * size).from_address(int(object)) new_object = [] for i in range(size): new_object += [int(y[i])] return new_object
064a9a1e43884a9f989bec0b31d6d19705764b64
708,932
import os import fnmatch def _get_all_files_in_directory(dir_path, excluded_glob_patterns): """Recursively collects all files in directory and subdirectories of specified path. Args: dir_path: str. Path to the folder to be linted. excluded_glob_patterns: set(str). Set of all glob patterns to be excluded. Returns: a list of files in directory and subdirectories without excluded files. """ files_in_directory = [] for _dir, _, files in os.walk(dir_path): for file_name in files: filepath = os.path.relpath( os.path.join(_dir, file_name), os.getcwd()) if not any([fnmatch.fnmatch(filepath, gp) for gp in excluded_glob_patterns]): files_in_directory.append(filepath) return files_in_directory
42a7f1220fd54b08b83dc9d89beef0c63c9d5cd0
708,933
def lonlat2px_gt(img, lon, lat, lon_min, lat_min, lon_max, lat_max): """ Converts a pair of lon and lat to its corresponding pixel value in an geotiff image file. Parameters ---------- img : Image File, e.g. PNG, TIFF Input image file lon : float Longitude lat : float Latitude lon_min, lat_min : float lower left coordinate of geotiff lon_max, lat_max : float upper right coordinate of geotiff Returns ------- Row : float corresponding pixel value Col : float corresponding pixel value """ w, h = img.size londiff = lon_max - lon_min latdiff = lat_max - lat_min mw = w / londiff mh = h / latdiff row = (-lat + lat_max) * mh col = (lon - lon_min) * mw return row, col
39c1aeb63d38fdac383c510913f50f177d274a04
708,934
async def async_setup_entry(hass, entry): """Set up Jenkins from a config entry.""" hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, "sensor") ) return True
c46912d11630c36effc07eed3273e42325c9b2b8
708,936
def de_dupe_list(input): """de-dupe a list, preserving order. """ sam_fh = [] for x in input: if x not in sam_fh: sam_fh.append(x) return sam_fh
bbf1936f21c19195369e41b635bf0f99704b3210
708,937
def adapter_rest(request, api_module_rest, api_client_rest): """Pass.""" return { "adapter": request.param, "api_module": api_module_rest, "api_client": api_client_rest, }
8b96313cb190f6f8a97a853e24a5fcfade291d76
708,938
import torch def count_regularization_baos_for_both(z, count_tokens, count_pieces, mask=None): """ Compute regularization loss, based on a given rationale sequence Use Yujia's formulation Inputs: z -- torch variable, "binary" rationale, (batch_size, sequence_length) percentage -- the percentage of words to keep Outputs: a loss value that contains two parts: continuity_loss -- \sum_{i} | z_{i-1} - z_{i} | sparsity_loss -- |mean(z_{i}) - percent| """ # (batch_size,) if mask is not None: mask_z = z * mask seq_lengths = torch.sum(mask, dim=1) else: mask_z = z seq_lengths = torch.sum(z - z + 1.0, dim=1) mask_z_ = torch.cat([mask_z[:, 1:], mask_z[:, -1:]], dim=-1) continuity_ratio = torch.sum(torch.abs(mask_z - mask_z_), dim=-1) / seq_lengths #(batch_size,) percentage = count_pieces * 2 / seq_lengths # continuity_loss = F.threshold(continuity_ratio - percentage, 0, 0, False) continuity_loss = torch.abs(continuity_ratio - percentage) sparsity_ratio = torch.sum(mask_z, dim=-1) / seq_lengths #(batch_size,) percentage = count_tokens / seq_lengths #(batch_size,) # sparsity_loss = F.threshold(sparsity_ratio - percentage, 0, 0, False) sparsity_loss = torch.abs(sparsity_ratio - percentage) return continuity_loss, sparsity_loss
7925c8621866a20f0c6130cd925afffe144e1c7c
708,939
def unsqueeze_samples(x, n): """ """ bn, d = x.shape x = x.reshape(bn//n, n, d) return x
0c7b95e97df07aea72e9c87996782081763664cf
708,940
def get_q_HPU_ave(Q_HPU): """1時間平均のヒートポンプユニットの平均暖房出力 (7) Args: Q_HPU(ndarray): 1時間当たりのヒートポンプユニットの暖房出力 (MJ/h) Returns: ndarray: 1時間平均のヒートポンプユニットの平均暖房出力 (7) """ return Q_HPU * 10 ** 6 / 3600
fdf339d7f8524f69409711d4daefd1e2aaccbc76
708,941
def prime_list(num): """ This function returns a list of prime numbers less than natural number entered. :param num: natural number :return result: List of primes less than natural number entered """ prime_table = [True for _ in range(num+1)] i = 2 while i ** 2 <= num: if prime_table[i]: j = i + i while j <= num: prime_table[j] = False j += i i += 1 result = [i for i in range(num) if prime_table[i] and i >= 2] return result
c8e05aae2a59c229cfafb997469dd8ccacdda0fc
708,943
import ipaddress def _item_to_python_repr(item, definitions): """Converts the given Capirca item into a typed Python object.""" # Capirca comments are just appended to item strings s = item.split("#")[0].strip() # A reference to another network if s in definitions.networks: return s # IPv4 address / network try: return ipaddress.IPv4Address(s) except ValueError: pass try: return ipaddress.IPv4Network(s, strict=False) except ValueError: pass # IPv6 address / network try: return ipaddress.IPv6Address(s) except ValueError: pass try: return ipaddress.IPv6Network(s, strict=False) except ValueError: pass raise ValueError("Unknown how to convert {s}".format(s=s))
9881e304e923eb2cea8223224273f4c9ef81696b
708,944
from typing import Callable import click def node_args_argument(command: Callable[..., None]) -> Callable[..., None]: """ Decorate a function to allow choosing arguments to run on a node. """ function = click.argument( 'node_args', type=str, nargs=-1, required=True, )(command) # type: Callable[..., None] return function
89365a41b7665cf291f5c15852db81e89aeef9a7
708,945
import functools import unittest def _tag_error(func): """Decorates a unittest test function to add failure information to the TestCase.""" @functools.wraps(func) def decorator(self, *args, **kwargs): """Add failure information to `self` when `func` raises an exception.""" self.test_failed = False try: func(self, *args, **kwargs) except unittest.SkipTest: raise except Exception: # pylint: disable=broad-except self.test_failed = True raise # re-raise the error with the original traceback. return decorator
a2818c63647410abea3fde0b7f4fdae667b558bf
708,946
import base64 import binascii def decode(password, encoded, notice): """ :type password: str :type encoded: str """ dec = [] try: encoded_bytes = base64.urlsafe_b64decode(encoded.encode()).decode() except binascii.Error: notice("Invalid input '{}'".format(encoded)) return for i in range(len(encoded_bytes)): key_c = password[i % len(password)] dec_c = chr((256 + ord(encoded_bytes[i]) - ord(key_c)) % 256) dec.append(dec_c) return "".join(dec)
5cf82bfbbe7eee458914113f648dadbe7b15dee8
708,947
from functools import reduce def replace(data, replacements): """ Allows to performs several string substitutions. This function performs several string substitutions on the initial ``data`` string using a list of 2-tuples (old, new) defining substitutions and returns the resulting string. """ return reduce(lambda a, kv: a.replace(*kv), replacements, data)
37b2ad5b9b6d50d81a8c1bcded9890de3c840722
708,949
def binomial(n, k): """ binomial coefficient """ if k < 0 or k > n: return 0 if k == 0 or k == n: return 1 num = 1 den = 1 for i in range(1, min(k, n - k) + 1): # take advantage of symmetry num *= (n + 1 - i) den *= i c = num // den return c
78910202202f749f8e154b074a55f6a5ddf91f64
708,950
def bubbleSort(arr): """ >>> bubbleSort(arr) [11, 12, 23, 25, 34, 54, 90] """ n = len(arr) for i in range(n-1): for j in range(0, n-i-1): if arr[j] > arr[j+1]: arr[j], arr[j+1] = arr[j+1], arr[j] return arr
28bc9d505ef44a4b403c0f91a971cccf74644c5a
708,951
import functools def CreateMnemonicsC(mnemonicsIds): """ Create the opcodes arrays for C header files. """ opsEnum = "typedef enum {\n\tI_UNDEFINED = 0, " pos = 0 l2 = sorted(mnemonicsIds.keys()) for i in l2: s = "I_%s = %d" % (i.replace(" ", "_").replace(",", ""), mnemonicsIds[i]) if i != l2[-1]: s += "," pos += len(s) if pos >= 70: s += "\n\t" pos = 0 elif i != l2[-1]: s += " " opsEnum += s opsEnum += "\n} _InstructionType;" # Mnemonics are sorted by insertion order. (Psuedo mnemonics depend on this!) # NOTE: EXTRA BACKSLASHES FORE RE.SUB !!! s = "const unsigned char _MNEMONICS[] =\n\"\\\\x09\" \"UNDEFINED\\\\0\" " l = list(zip(mnemonicsIds.keys(), mnemonicsIds.values())) l = sorted(l, key=functools.cmp_to_key(lambda x, y: x[1] - y[1])) for i in l: s += "\"\\\\x%02x\" \"%s\\\\0\" " % (len(i[0]), i[0]) if len(s) - s.rfind("\n") >= 76: s += "\\\\\n" s = s[:-1] + ";" # Ignore last space. # Return enum & mnemonics. return (opsEnum, s)
a20a01fbefc1175c24144753264edc938258cdca
708,952
def get_filename(file_fullpath): """ Returns the filename without the full path :param file_fullpath: :return: Returns the filename """ filename = file_fullpath.split("/")[-1].split(".")[0] return filename
903cb26c89d1d18c9ebafe1a468c7fa66c51f119
708,953
import click def get_help_recursive(group, ctx, commands): """ Returns help for arbitrarily nested subcommands of the given click.Group. """ try: command_name = commands.pop(0) group = group.get_command(ctx, command_name) if not group: raise click.ClickException('Invalid command: {}'.format(command_name)) except IndexError: # end of subcommand chain return group.get_help(ctx) except AttributeError: # group is actually a command with no children return group.get_help(ctx) return get_help_recursive(group, ctx, commands)
412f0cb9e9aa1f19caf4a4a5db95c8040a0d2f36
708,955
def get_blender_frame_time(skeleton, frame_id, rate, time_scale, actor_id): """Goes from multi-actor integer frame_id to modded blender float time.""" # stays within video frame limits frame_id2 = skeleton.mod_frame_id(frame_id=frame_id) # type: int time_ = skeleton.get_time(frame_id) if actor_id > 0: time_ = frame_id2 / rate print('time is {} for {} ({}), orig time: {}, rate: {}, ' 'time_scale: {}' .format(time_, frame_id, frame_id2, skeleton.get_time(frame_id), rate, time_scale)) frame_time = time_ * time_scale return frame_time
ca8ab45dbbb1b28b05894b9dd92529245441c60b
708,956
def calculate_accuracy(y_true, y_pred): """Calculates the accuracy of the model. Arguments: y_true {numpy.array} -- the true labels corresponding to each input y_pred {numpy.array} -- the model's predictions Returns: accuracy {str} -- the accuracy of the model (%) """ correctpred, total = 0, 0 for index in range(len(y_pred)): if(y_pred[index] == y_true[index]): correctpred = correctpred + 1 total = total+1 return 'accuracy='+str((correctpred*100)/total)
1ea14f8e4f50d13e2ae557aeec466c5372b99171
708,957
def generate_interblock_leader(): """Generates the leader between normal blocks""" return b'\x55' * 0x2
99878b67a31a4169bc73ad9b9b249a981a22177f
708,958
def get_url_name(url_): """从url_中获取名字""" raw_res = url_.split('/', -1)[-1] raw_res = raw_res.split('.', 1)[0] res = raw_res[-15:] return res
a8f3b8dbc4a53e839b3047604e71ffaf36c00767
708,959
def remove_prefix(string, prefix): """ This function removes the given prefix from a string, if the string does indeed begin with the prefix; otherwise, it returns the string unmodified. """ if string.startswith(prefix): return string[len(prefix):] else: return string
73cffca0e9938ea48f3781c7821fcbcf56e0cf25
708,960
import torch def action_probs_to_action(probs): """ Takes output of controller and converts to action in format [0,0,0,0] """ forward = probs[:, 0:2]; camera=probs[:, 2:5]; jump=probs[:,5:7]; action = [torch.distributions.Categorical(p).sample().detach().item() for p in [forward,camera,jump]] action.append(0) # not allowing any motion along side dimension return action
00395569cd3fb7696bd0aa050f6fbcd6641d3741
708,961
def minute_info(x): """ separates the minutes from time stamp. Returns minute of time. """ n2 = x.minute return n2/60
c166bb8f759a5eed1b45b2dd8f228206357deb28
708,962
from bs4 import BeautifulSoup def remove_html_tags(text): """Removes HTML Tags from texts and replaces special spaces with regular spaces""" text = BeautifulSoup(text, 'html.parser').get_text() text = text.replace(u'\xa0', ' ') return text
7f31a18d81ebc80b202ac697eb7b19fe206aed95
708,963
def split_str_to_list(input_str, split_char=","): """Split a string into a list of elements. Args: input_str (str): The string to split split_char (str, optional): The character to split the string by. Defaults to ",". Returns: (list): The string split into a list """ # Split a string into a list using `,` char split_str = input_str.split(split_char) # For each element in split_str, strip leading/trailing whitespace for i, element in enumerate(split_str): split_str[i] = element.strip() return split_str
2b13868aed1869310a1398886f6777ddceb6c777
708,964
import re def formatRFC822Headers(headers): """ Convert the key-value pairs in 'headers' to valid RFC822-style headers, including adding leading whitespace to elements which contain newlines in order to preserve continuation-line semantics. """ munged = [] linesplit = re.compile(r'[\n\r]+?') for key, value in headers: vallines = linesplit.split(value) while vallines: if vallines[-1].rstrip() == '': vallines = vallines[:-1] else: break munged.append('%s: %s' % (key, '\r\n '.join(vallines))) return '\r\n'.join(munged)
4c7dd97c9079daf144acf83241ebe9f025020611
708,965
def clean_meta(unclean_list): """ cleans raw_vcf_header_list for downstream processing :return: """ clean_list = [] for i in unclean_list: if "=<" in i: i = i.rstrip(">") i = i.replace("##", "") ii = i.split("=<", 1) else: i = i.replace("##", "") ii = i.split("=", 1) clean_list.append(ii) return clean_list
03dcbcad57b129fd6ff379f3fb3181c91f8f4106
708,966
import re def snake_case(string: str) -> str: """Convert upper camelcase to snake case.""" return re.sub(r"(?<!^)(?=[A-Z])", "_", string).lower()
fe8592bcfa1f2233a07308741de5f912fd7055b3
708,967
import argparse def positive_int(s: str) -> int: """Positive integer validator for `argparse.ArgumentParser`.""" i = int(s) if i < 0: raise argparse.ArgumentTypeError("A positive number is required") return i
480f68e296e3fedfef4dae26f1a8563691056a17
708,968
def remove_cmds_from_title(title): """ Função que remove os comandos colocados nos títulos apenas por uma questão de objetividade no título """ arr = title.split() output = " ".join(list(filter(lambda x: x[0] != "!", arr))) return output
bfaa96aa578455f977549b737a8492afa80e1e7c
708,969
def get_error_directory_does_not_exists(dir_kind): """dir kind = [dir, file ,url]""" return f"Error: Directory with {dir_kind} does not exist:"
171fb09ab341daf2810612f2cc7c077b5326f347
708,970
def var_text(vname, iotype, variable): """ Extract info from variable for vname of iotype and return info as HTML string. """ if iotype == 'read': txt = '<p><i>Input Variable Name:</i> <b>{}</b>'.format(vname) if 'required' in variable: txt += '<br><b><i>Required Input Variable</i></b>' else: txt = '<p><i>Output Variable Name:</i> <b>{}</b>'.format(vname) txt += '<br><i>Description:</i> {}'.format(variable['desc']) txt += '<br><i>Datatype:</i> {}'.format(variable['type']) if iotype == 'read': txt += '<br><i>Availability:</i> {}'.format(variable['availability']) txt += '<br><i>IRS Form Location:</i>' formdict = variable['form'] for yrange in sorted(formdict.keys()): txt += '<br>{}: {}'.format(yrange, formdict[yrange]) txt += '</p>' return txt
04fdb1727c8eb783f7fb2c0324852e80673e8b77
708,971
def get_Theta_ref_cnd_H(Theta_sur_f_hex_H): """(23) Args: Theta_sur_f_hex_H: 暖房時の室内機熱交換器の表面温度(℃) Returns: 暖房時の冷媒の凝縮温度(℃) """ Theta_ref_cnd_H = Theta_sur_f_hex_H if Theta_ref_cnd_H > 65: Theta_ref_cnd_H = 65 return Theta_ref_cnd_H
deccaa524aebda2a7457da53b44c517287a190a4
708,972
def basic_streamalert_config(): """Generate basic StreamAlert configuration dictionary.""" return { 'global': { 'account': { 'aws_account_id': '123456789123', 'kms_key_alias': 'stream_alert_secrets', 'prefix': 'unit-testing', 'region': 'us-west-2' }, 'terraform': { 'tfstate_bucket': 'unit-testing.streamalert.terraform.state', 'tfstate_s3_key': 'stream_alert_state/terraform.tfstate', 'tfvars': 'terraform.tfvars' }, 'infrastructure': { 'monitoring': { 'create_sns_topic': True, 'metric_alarms': { 'rule_processor': { 'Aggregate Unit Testing Failed Parses Alarm': { 'alarm_description': '', 'comparison_operator': 'GreaterThanOrEqualToThreshold', 'evaluation_periods': 1, 'metric_name': 'RuleProcessor-FailedParses', 'period': 300, 'statistic': 'Sum', 'threshold': 1.0 } } } } } }, 'lambda': { 'alert_processor_config': { 'handler': 'stream_alert.alert_processor.main.handler', 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '<auto_generated>', 'source_object_key': '<auto_generated>', 'third_party_libraries': [] }, 'rule_processor_config': { 'handler': 'stream_alert.rule_processor.main.handler', 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '<auto_generated>', 'source_object_key': '<auto_generated>', 'third_party_libraries': [ 'jsonpath_rw', 'netaddr' ] }, 'athena_partition_refresh_config': { 'current_version': '$LATEST', 'enable_metrics': False, 'enabled': True, 'handler': 'main.handler', 'memory': 128, 'partitioning': { 'firehose': {}, 'normal': { 'unit-testing.streamalerts': 'alerts' } }, 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '<auto_generated>', 'source_object_key': '<auto_generated>', 'third_party_libraries': [ 'backoff' ], 'timeout': 60 }, }, 'clusters': { 'prod': { 'id': 'prod', 'modules': { 'cloudwatch_monitoring': { 'enabled': True }, 'kinesis': { 'firehose': { 'enabled': True, 's3_bucket_suffix': 'streamalert.results' }, 'streams': { 'retention': 24, 'shards': 1 } }, 'kinesis_events': { 'enabled': True }, 'stream_alert': { 'alert_processor': { 'current_version': '$LATEST', 'memory': 128, 'timeout': 10 }, 'rule_processor': { 'current_version': '$LATEST', "enable_metrics": True, 'memory': 128, 'metric_alarms': { 'Prod Unit Testing Failed Parses Alarm': { 'alarm_description': '', 'comparison_operator': 'GreaterThanOrEqualToThreshold', 'evaluation_periods': 1, 'metric_name': 'RuleProcessor-FailedParses-PROD', 'period': 300, 'statistic': 'Sum', 'threshold': 1.0 } }, 'timeout': 10 } } }, 'outputs': { 'kinesis': [ 'username', 'access_key_id', 'secret_key' ] }, 'region': 'us-east-1' } } }
8e766fa73c9043888c6531659bccc57fcb1a88ea
708,973
def makemarkers(nb): """ Give a list of cycling markers. See http://matplotlib.org/api/markers_api.html .. note:: This what I consider the *optimal* sequence of markers, they are clearly differentiable one from another and all are pretty. Examples: >>> makemarkers(7) ['o', 'D', 'v', 'p', '<', 's', '^'] >>> makemarkers(12) ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>', 'o', 'D'] """ allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>'] longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time return longlist[:nb]
a1dc00cdb831b3b622670a5f36ba956273379b16
708,974
def _get_only_relevant_data(video_data): """ Method to build ES document with only the relevant information """ return { "kind": video_data["kind"], "id": video_data["id"], "published_at": video_data["snippet"]["publishedAt"], "title": video_data["snippet"]["title"], "description": video_data["snippet"]["description"], "thumbnail_url": video_data["snippet"]["thumbnails"]["default"]["url"], "channel_title": video_data["snippet"]["channelTitle"], }
b5d2a0cf2c5b7121c92e95adb524379d7cf3eb9c
708,975
import json def set_parameters(_configs, new=False): """ Sets configuration parameters Parameters ---------- _configs : Dictionary containing configuration options from the config file (config.json) new : bool Do you want to start from a new file? Returns ------- _configs : Updated dictionary containing configuration options from the config file (config.json) """ if new: _configs = {x: "NA" for x in _configs} print('*Do not include single or double quotes*\n') if _configs['eye_mask_path'] == 'NA': _eye_mask_path = input('Add the full eye mask filepath: ') _configs['eye_mask_path'] = _eye_mask_path if _configs['train_file'] == 'NA': _train_file = input('Add the name of the file used for training [peer1.nii.gz]: ') if not _train_file: _configs['train_file'] = 'peer1.nii.gz' else: _configs['train_file'] = _train_file if _configs['test_file'] == 'NA': _test_file = input('Which file would you like to predict eye movements from? [movie.nii.gz]: ') if not _test_file: _configs['test_file'] = 'movie.nii.gz' else: _configs['test_file'] = _test_file if _configs['use_gsr'] == 'NA': _use_gsr = input('Use global signal regression? (y/n) [n]: ') if (not _use_gsr) or (_use_gsr == 'n'): _configs['use_gsr'] = "0" else: _configs['use_gsr'] = "1" if _configs['motion_scrub'] == 'NA': _use_ms = input('Use motion scrubbing? (y/n) [n]: ') if (not _use_ms) or (_use_ms == 'n'): _configs['use_ms'] = "0" _configs['motion_threshold'] = "0" _configs['motion_scrub'] = "Not implemented" elif _use_ms == 'y': _configs['use_ms'] = "1" _motion_scrub_filename = input('Add the filename of the CSV that contains the framewise displacement \ time series [motion_ts.csv]: ') if not _motion_scrub_filename: _configs['motion_scrub'] = 'motion_ts.csv' else: _configs['motion_scrub'] = _motion_scrub_filename _motion_threshold = input('Add a motion threshold for motion scrubbing [.2]: ') if not _motion_threshold: _configs['motion_threshold'] = ".2" else: _configs['motion_threshold'] = _motion_threshold with open('peer/config.json', 'w') as f: json.dump(_configs, f) return _configs
7c0d52f5a2ee5df9b54278162570606d684a6a64
708,976
import os def read_requirements(filename='requirements.txt'): """Reads the list of requirements from given file. :param filename: Filename to read the requirements from. Uses ``'requirements.txt'`` by default. :return: Requirements as list of strings """ # allow for some leeway with the argument if not filename.startswith('requirements'): filename = 'requirements-' + filename if not os.path.splitext(filename)[1]: filename += '.txt' # no extension, add default def valid_line(line): line = line.strip() return line and not any(line.startswith(p) for p in ('#', '-')) def extract_requirement(line): egg_eq = '#egg=' if egg_eq in line: _, requirement = line.split(egg_eq, 1) return requirement return line with open(filename) as f: lines = f.readlines() return list(map(extract_requirement, filter(valid_line, lines)))
fcade21b1aaff9320b32f5572fc28f8a6d31b5ab
708,977
def sort(X): """ Return sorted elements of :param:`X` and array of corresponding sorted indices. :param X: Target vector. :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix` """ assert 1 in X.shape, "X should be vector." X = X.flatten().tolist()[0] return sorted(X), sorted(list(range(len(X))), key=X.__getitem__)
a176e2538fd1c0042eefc6962d1b354b7b4ca736
708,978
def get_query(sf, query_text, verbose=True): """ Returns a list of lists based on a SOQL query with the fields as the header column in the first list/row """ # execute query for up to 2,000 records gc = sf.query(query_text) records = gc['records'] if verbose: print('Reading from %s object' % records[0]['attributes']['type'], flush=True) headers = list(records[0].keys())[1:] # get the headers return_table = [ [record[heading] for heading in headers] for record in records] return_table.insert(0, headers) # the above is complete unless there are >2,000 records total_read_so_far = len(records) while not gc['done']: if verbose: print('Progress: {} records out of {}'.format( total_read_so_far, gc['totalSize']), flush=True) gc = sf.query_more(gc['nextRecordsUrl'], True) records = gc['records'] total_read_so_far += len(records) next_table = [ [record[heading] for heading in headers] for record in records] return_table.extend(next_table) return return_table
ea93b6652a2d455b368a831d8c6d6b4554023313
708,979
import io import csv def strip_blank(contents): """ strip the redundant blank in file contents. """ with io.StringIO(contents) as csvfile: csvreader = csv.reader(csvfile, delimiter=",", quotechar='"') rows = [] for row in csvreader: rows.append(",".join(['"{}"'.format(x.strip()) for x in row])) return "\n".join(rows)
d446f2123aa3cfe3b1966151f323fa1c4e41cb08
708,980
def anova_old( expression, gene_id, photoperiod_set, strain_set, time_point_set, num_replicates ): """One-way analysis of variance (ANOVA) using F-test.""" num_groups = len(photoperiod_set) * len(strain_set) * len(time_point_set) group_size = num_replicates total_expression = 0 # First scan: calculate overall average. for pp in photoperiod_set: for ss in strain_set: for tt in time_point_set: total_expression += sum(expression[(gene_id, pp, ss, tt)]) overall_avg = total_expression / num_groups / group_size # Second scan: calculate variances. in_group_var = 0 bt_group_var = 0 for pp in photoperiod_set: for ss in strain_set: for tt in time_point_set: group = expression[(gene_id, pp, ss, tt)] group_avg = sum(group) / group_size in_group_var += group_size * (group_avg - overall_avg) ** 2 for element in group: bt_group_var += (element - group_avg) ** 2 dof = (num_groups - 1, group_size * num_groups - num_groups) f_stat = bt_group_var / dof[0] / in_group_var * dof[1] return f_stat, dof
f809e0e2be877e1a0f21ca1e05a7079db80254a1
708,981
def all_same(lst: list) -> bool: """test if all list entries are the same""" return lst[1:] == lst[:-1]
4ef42fc65d64bc76ab1f56d6e03def4cb61cf6f0
708,982
def binary_find(N, x, array): """ Binary search :param N: size of the array :param x: value :param array: array :return: position where it is found. -1 if it is not found """ lower = 0 upper = N while (lower + 1) < upper: mid = int((lower + upper) / 2) if x < array[mid]: upper = mid else: lower = mid if array[lower] <= x: return lower return -1
ed6e7cc15de238381dbf65eb6c981676fd0525f5
708,983
def scalarmat(*q): """multiplies every object in q with each object in q. Should return a unity matrix for an orthonormal system""" ret=[] for a in q: toa=[] for b in q: toa.append(a*b) ret.append(toa) return ret
a61c813b548f1934e16517efc4d203c6390097fe
708,984
def modularity(partition, graph, weight='weight'): """Compute the modularity of a partition of a graph Parameters ---------- partition : dict the partition of the nodes, i.e a dictionary where keys are their nodes and values the communities graph : networkx.Graph the networkx graph which is decomposed weight : str, optional the key in graph to use as weight. Default to 'weight' Returns ------- modularity : float The modularity Raises ------ KeyError If the partition is not a partition of all graph nodes ValueError If the graph has no link TypeError If graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & Girvan, M. Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError("Bad graph type, use only non directed graph") """ inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError("A graph without link has an undefined modularity") for node in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com in set(partition.values()): res += inc.get(com, 0.) - \ ((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0 / links) * res
371c3f5e362114896bf0559efe452d79af6e79f8
708,985
import os def listFiles(dir): """ Walks the path and subdirectories to return a list of files. Parameters ---------- dir : str the top directory to search subdirectories are also searched Returns ------- listname: list a list of files in dir and subdirectories Notes ----- This can be replaced by functions in `os.path`, as if 3.4, pathlib is probably better. It is not clear that this function is used anywhere in ChiantiPy """ alist = os.walk(dir) listname = [] for (dirpath,dirnames,filenames) in alist: if len(dirnames) == 0: for f in filenames: file = os.path.join(dirpath,f) if os.path.isfile(file): listname.append(file) else: for f in filenames: file = os.path.join(dirpath,f) if os.path.isfile(file): listname.append(file) return listname
e2cc32ffd29971fc11df0378f02f801932234569
708,986
def deque_to_yaml(representer, node): """Convert collections.deque to YAML""" return representer.represent_sequence("!collections.deque", (list(node), node.maxlen))
5ff503b4f21af58cf96d26171e078ddd5d754141
708,987
from typing import Callable def some_func(string: str, function: Callable) -> bool: """Check if some elements in a string match the function (functional). Args: string: <str> string to verify. function: <callable> function to call. Returns: True if some of elements are in the sequence are True. Examples: >>> assert some_func('abcdefg&%$', str.isalpha) >>> assert not some_func('&%$=', str.isalpha) """ return any(map(function, string)) and not all(map(function, string))
e67af6613975a6757905087397ff8b68e83ddbf6
708,988
import math def fuel_requirement(mass: int) -> int: """Fuel is mass divide by three, round down and subtract 2""" return math.floor(mass / 3) - 2
5899d9260fe7e353c3a1d882f624257d5009248d
708,990
def instruction2_task(scr): """ Description of task 1 """ scr.draw_text(text = "Great Work!! "+ "\n\nNow comes your TASK 3: **Consider an image**."+ "\n\nIf you press the spacebar now, an image will "+ "appear at the bottom of the screen. You can use the information from the"+ " image to make any modifications to the translation of the sentence."+ "\n\n***However in certain cases, the image is not related to the sentence "+ "or not present at all.***"+ "\n\nAfter looking at the image, say loudly if you'd like to modify your translation"+ " by saying "+ "\"I'd like to modify my translation.\" or \"I'd keep the same translation\""+ "\nif you would like to stick with your translation."+ "\n\nThe final TASK 4 is to **Say the translation again (modified or not)**."+ "\nPlease press the spacebar to indicate the start of your new translation.\nYou can stop your"+ " recording by pressing the spacebar and moving to the next sentence.", fontsize = 25) return scr
554191b520e1229ffc076bbed1c57f265e0c0964
708,991
import os def tail(f, lines=10, _buffer=4098): """Tail a file and get X lines from the end""" # place holder for the lines found lines_found = [] # block counter will be multiplied by buffer # to get the block size from the end block_counter = -1 # loop until we find X lines while len(lines_found) < lines: try: f.seek(block_counter * _buffer, os.SEEK_END) except IOError: # either file is too small, or too many lines requested f.seek(0) lines_found = f.readlines() break lines_found = f.readlines() # we found enough lines, get out # Removed this line because it was redundant the while will catch # it, I left it for history # if len(lines_found) > lines: # break # decrement the block counter to get the # next X bytes block_counter -= 1 return lines_found[-lines:]
20ccac940eff04a6ec57d98d32330ebfbb97037d
708,992
def create_lag_i(df,time_col,colnames,lag): """ the table should be index by i,year """ # prepare names if lag>0: s = "_l" + str(lag) else: s = "_f" + str(-lag) values = [n + s for n in colnames] rename = dict(zip(colnames, values)) # create lags dlag = df.reset_index() \ .assign(t=lambda d: d[time_col] + lag) \ .rename(columns=rename)[['i',time_col] + values] \ .set_index(['i',time_col]) # join and return return(df.join(dlag))
be6d4b390ae66cd83320b2c341ba3c76cfad2bdb
708,993
def crop_image(image_array, point, size): """ Cropping the image into the assigned size image_array: numpy array of image size: desirable cropped size return -> cropped image array """ img_height, img_width = point # assigned location in crop # for color image if len(image_array.shape) == 3: image_array = image_array[:, img_height:img_height + size[0], img_width:img_width + size[1]] # for gray image elif len(image_array.shape) == 2: image_array = image_array[img_height:img_height + size[0], img_width:img_width + size[1]] return image_array
8ee684719e3e4fea755466e810c645c1ccf7d7f5
708,994
import re def lines_in_pull(pull): """Return a line count for the pull request. To consider both added and deleted, we add them together, but discount the deleted count, on the theory that adding a line is harder than deleting a line (*waves hands very broadly*). """ ignore = r"(/vendor/)|(conf/locale)|(static/fonts)|(test/data/uploads)" lines = 0 files = pull.get_files() for f in files: if re.search(ignore, f.filename): #print("Ignoring file {}".format(f.filename)) continue lines += f.additions + f.deletions//5 if pull.combinedstate == "merged" and lines > 2000: print("*** Large pull: {lines:-6d} lines, {pr.created_at} {pr.number:-4d}: {pr.title}".format(lines=lines, pr=pull)) return lines
24aabd83c24c3f337f07b50c894f5503eadfc252
708,996
import sys def ensure_tty(file=sys.stdout): """ Ensure a file object is a tty. It must have an `isatty` method that returns True. TypeError is raised if the method doesn't exist, or returns False. """ isatty = getattr(file, 'isatty', None) if isatty is None: raise TypeError( 'Cannot detect tty, file has no `isatty` method: {}'.format( getattr(file, 'name', type(file).__name__) ) ) if not isatty(): raise TypeError( 'This will not work, file object is not a tty: {}'.format( getattr(file, 'name', type(file).__name__) ) ) return True
52981903549b5241c22073df94b39db3eb4e3271
708,997
import requests def save_to_disk(url, save_path): """ Saves to disk non-destructively (xb option will not overwrite) """ print('Downloading: %s' % url) r = requests.get(url) if r.status_code == 404: print('URL broken, unable to download: %s' % url) return False else: with open(save_path, 'xb') as f: f.write(r.content) return True
c9917a637026d999765364d3c276150681554129
708,998
import json import hashlib def hashify(params, max_length=8): """ Create a short hashed string of the given parameters. :param params: A dictionary of key, value pairs for parameters. :param max_length: [optional] The maximum length of the hashed string. """ param_str = json.dumps(params, separators=(',', ':'), sort_keys=True) param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest() return param_hash[:max_length]
e4a97a28fc2d0564da3e6b22f32735b4a2534c3e
709,000
import os import io import re def version(package, encoding='utf-8'): """Obtain the packge version from a python file e.g. pkg/__init__.py See <https://packaging.python.org/en/latest/single_source_version.html>. """ path = os.path.join(os.path.dirname(__file__), package, '__init__.py') with io.open(path, encoding=encoding) as fp: version_info = fp.read() version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""", version_info, re.M) if not version_match: raise RuntimeError("Unable to find version string.") return version_match.group(1)
6066b042a698d0ee2b816573a144c4dc5ac47a45
709,001
def unique_entries(results): """Prune non-unqiue search results.""" seen = set() clean_results = [] for i in results: if i['code'] not in seen: clean_results.append(i) seen.add(i['code']) return clean_results
c0c55ebd5aa76f3a7f44134a972019c3d26c1c48
709,002
def generate_offices_table(offices, by_office, by_polling_center, election_day, day_after_election_day): """ Pre-compute key data needed for generating election day office reports. """ offices_by_key = {str(office['code']): office for office in offices} rows = [] for key in sorted([key for key in by_office.keys()]): row = by_office[key] key = str(key) # copy name from the offices hash array row['english_name'] = offices_by_key[key]['english_name'] row['arabic_name'] = offices_by_key[key]['arabic_name'] on_election_day = row.get(election_day, {}) # get election day numbers row['opened'] = on_election_day.get('opened', 0) row['votes_reported_1'] = on_election_day.get('1', 0) row['votes_reported_2'] = on_election_day.get('2', 0) row['votes_reported_3'] = on_election_day.get('3', 0) # and aggregate counts row['reported_1'] = on_election_day.get('1_count', 0) row['reported_2'] = on_election_day.get('2_count', 0) row['reported_3'] = on_election_day.get('3_count', 0) # check for late results # We only want late reports for period 4. The JSON data has aggregate # numbers for office by day, but you can't tell which of those values are new reports on # EDAY+1 and which ones are replacements for values given on EDAY, so we have to iterate # through each center to get that info row['votes_reported_4'] = 0 reported_4 = 0 # Which polling centers are in this office? centers = {k: v for k, v in by_polling_center.items() if str(v['office_id']) == key} for center_id, center in centers.items(): if day_after_election_day in center and '4' in center[day_after_election_day]: # found a period 4 report on EDAY+1. Sum the votes and increment the report count row['votes_reported_4'] += center[day_after_election_day]['4'] reported_4 += 1 elif election_day in center and '4' in center[election_day]: # didn't find an EDAY+1 report, so use EDAY, if present row['votes_reported_4'] += center[election_day]['4'] reported_4 += 1 row['reported_4'] = reported_4 # save derived values row['not_opened'] = row['polling_center_count'] - row['opened'] row['not_reported_1'] = row['polling_center_count'] - row['reported_1'] row['not_reported_2'] = row['polling_center_count'] - row['reported_2'] row['not_reported_3'] = row['polling_center_count'] - row['reported_3'] row['not_reported_4'] = row['polling_center_count'] - reported_4 row['closed'] = reported_4 # reporting final tally means center closed rows.append(row) return rows
85111ed67e8f6b8dce71af2844ee865699f3fe01
709,003
def pfunc_role_coverage(args): """Another intermediate function for parallelization; as for pfunc_doctor_banding.""" rota = args[0] role = args[1] return rota.get_role_coverage(role)
043ce250b428d443de90c7aa5fa8e8dcc2869303
709,004
def globalPrediction(vid, category_names, vid_probs, predicted_labels): """ Get a matrix of probabilities over the classes for the c3d features of a video. Generate the top 3 predictions from the prob matrix """ anno_list = [] # Idea 1 : To form the hist over the categories, each bin has sum of probs vprobs_sum = vid_probs.sum(axis=0) top_n = vprobs_sum.sort_values(ascending = False)[:3] #counter = collections.Counter(predicted_labels) #top_n = counter.most_common(3) # list of tuples #assert len(top_n)==3 labels = top_n.index.tolist() scores = top_n.values.tolist() for idx,score in enumerate(scores): anno_list.append({'score': score, 'label':labels[idx]}) #for (idx,score) in top_n: # anno_list.append({'score': score, 'label':category_names[idx]}) # Idea 2 : Detect temporal continuity of category predicted. Longer the better # Idea 3 : Count the number of highest votes for top category. (Worse than 1) # If equal votes for >1 category then use Idea 1 # finds the max val index among the columns for each row and the freq of the # occurrence of the column names (in decreasing order) # labels = vid_probs.idxmax(axis=1).value_counts()[:3].index.tolist() # scores = probs_sum[labels].tolist() # for idx,score in enumerate(scores): # anno_list.append({'score': score, 'label':labels[idx]}) return anno_list, vprobs_sum
51676499cbf719874c49b89557d960ed8a136243
709,005
from typing import Optional from typing import Any def geq(column: str, value: Optional[Any]) -> str: """ >>> geq("col", None) '1' >>> geq("col", 1) 'col >= 1' >>> geq("col", "1") "col >= '1'" """ if not value: return "1" if isinstance(value, str): return f"{column} >= '{value}'" return f"{column} >= {value}"
9216b8e2480232840ad37d8fe0e5c0f07b88873f
709,006
def longest_match(list1, list2): """ Find the length of the longest substring match between list1 and list2. >>> longest_match([], []) 0 >>> longest_match('test', 'test') 4 >>> longest_match('test', 'toast') 2 >>> longest_match('supercalifragilisticexpialidocious', 'mystical californication') 5 """ m = len(list1) n = len(list2) data = [[0 for col in range(n+1)] for row in range(m+1)] for a in range(1, m+1): for b in range(1, n+1): if list1[a-1] == list2[b-1]: data[a][b] = 1 + data[a-1][b-1] else: data[a][b] = 0 maxes = [max(row) for row in data] return max(maxes)
4a84dacbb0d59fc7f9c4b59e87e55c72416b8c80
709,007
import multiprocessing def get_cpu_count(): """ Try and estimate the number of CPU on the host. First using multiprocessing native function, other using content of /proc/cpuinfo. If none of those methods did work, 4 is returned. """ try: cpucount = multiprocessing.cpu_count() except: try: s = open("/proc/cpuinfo").read() cpucount = int(s.split('processor')[-1].split(":")[1].split("\n")[0]) cpucount += 1 except: cpucount = 4 return cpucount
db58112537c4a111ec1ef24eeab70227678d6d1e
709,008
def pylm_component(name): """Decorator for registering a class to lightmetrica""" def pylm_component_(object): # Get base class base = object.__bases__[0] base.reg(object, name) return object return pylm_component_
531c7e3f224b824b438011d4be348a76154b3444
709,009
import torch def dice_score(input_mask, target_mask, eps=1e-5): """ input mask: (B * K, HW) #probabilities [0, 1] target_mask: (B * K, HW) #binary """ dims = tuple(range(1, input_mask.ndimension())) intersections = torch.sum(input_mask * target_mask, dims) #(B, N) cardinalities = torch.sum(input_mask + target_mask, dims) dice = ((2. * intersections + eps) / (cardinalities + eps)) return dice
8fbe4b7aaec4a45d7dec4705e4c3feb348250b64
709,010
import torch def psnr(img1, img2): """ compute PSNR between two images """ MSE = torch.mean((img1-img2)**2) return 10*torch.log10(1**2/MSE)
f216733631d224aa27f5c5a395c143c3768f8f28
709,012
import os def get_para_input(arg): """Get input directory parameter""" input_dir = os.path.abspath(arg) if str(input_dir).endswith('/'): input_dir = input_dir[:-1] input_dir = input_dir.replace('\\', '/') return input_dir
13ad4d14cac7c4b77e40d0e264e5197b2fbb459b
709,013