content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def scheming_field_by_name(fields, name): """ Simple helper to grab a field from a schema field list based on the field name passed. Returns None when not found. """ for f in fields: if f.get('field_name') == name: return f
ba4d04585b12ab941db8bc0787b076c32e76cadb
707,894
def daysBetweenDates(year1, month1, day1, year2, month2, day2): """Returns the number of days between year1/month1/day1 and year2/month2/day2. Assumes inputs are valid dates in Gregorian calendar, and the first date is not after the second.""" month = month2 year = year2 day = day2 - day1 if (day < 0): day += 30 month -= 1 month = month - month1 if (month < 0): month += 12 year -= 1 year = year - year1 return (year * 360) + month * 30 + day
687a7ff0b29ec2a931d872c18057741d93571ac1
707,895
def collapse(board_u): """ takes a row/column of the board and collapses it to the left """ i = 1 limit = 0 while i < 4: if board_u[i]==0: i += 1 continue up_index = i-1 curr_index = i while up_index>=0 and board_u[up_index]==0: board_u[up_index] = board_u[curr_index] board_u[curr_index] = 0 up_index -= 1 curr_index -= 1 if up_index >= limit and board_u[up_index]==board_u[curr_index]: board_u[up_index] *= 2 board_u[curr_index] = 0 limit = curr_index i += 1 return board_u
a79a3c7b83355f95face09e0ab21ab0b15a81053
707,896
import re def find_match_in_file(search_term, file_location): """ This function is used to query a file search_term = Term to find file_location = Location of file to query. """ try: with open(file_location) as line: for search in line: result = re.match(search_term, search) if result: return result return except Exception as err: print(err)
d78776069c8f2b4da5f09bf0ce3e675e215ee584
707,897
def create_C1(data_set): """ Create frequent candidate 1-itemset C1 by scaning data set. Args: data_set: A list of transactions. Each transaction contains several items. Returns: C1: A set which contains all frequent candidate 1-itemsets """ C1 = set() for t in data_set: for item in t: item_set = frozenset([item]) C1.add(item_set) return C1
9f3deb61c6c3b982976c61c4247102431794daa8
707,898
def generalise_sent_pos(s): """ generalise sentence pattern by POS tags only :param s: :return: """ rets = [] for token in s['sent']: e = token.idx + len(token.text) is_matched = False for ann in s['anns']: if token.idx >= ann['s'] and e <= ann['e']: rets.append((token.text, token.pos_, True, ann['signed_label'], ann['gt_label'])) is_matched = True break # print '%s-%s, %s: [%s]' % (token.idx, e, token.idx, token.text) if not is_matched: rets.append((token.text, token.pos_)) return {"sent": s['sent'].text, 'pattern': rets}
03092bd253f13739d918438839ff4234f5ef80af
707,899
def line_break(text, line_len=79, indent=1): """ Split some text into an array of lines. Enter: text: the text to split. line_len: the maximum length of a line. indent: how much to indent all but the first line. Exit: lines: an array of lines. """ lines = [text.rstrip()] while len(lines[-1]) > line_len: pos = lines[-1].rfind(' ', 0, line_len) if pos < 0: pos = line_len lines[-1:] = [lines[-1][:pos].rstrip(), ' '*indent+lines[-1][ pos:].strip()] return lines
34b866109689796a4d428e7d3a68a34f7152250f
707,900
def altsumma(f, k, p): """Return the sum of f(i) from i=k, k+1, ... till p(i) holds true or 0. This is an implementation of the Summation formula from Kahan, see Theorem 8 in Goldberg, David 'What Every Computer Scientist Should Know About Floating-Point Arithmetic', ACM Computer Survey, Vol. 23, No. 1, March 1991.""" if not p(k): return 0 else: S = f(k) C = 0 j = k + 1 while p(j): Y = f(j) - C T = S + Y C = (T - S) - Y S = T j += 1 return S
952e77fcedfbe01658342126d95b79175c082976
707,902
def word_sorter(x): """ Function to sort the word frequency pairs after frequency Lowest frequency collocates first - highest frerquency collocates last """ # getting length of list of word/frequency pairs lst = len(x) # sort by frequency for i in range(0, lst): for j in range(0, lst-i-1): if (x[j][1] > x[j + 1][1]): temp = x[j] x[j]= x[j + 1] x[j + 1] = temp return(x)
571570bb03d6473b9c6839aa6fdc0b1ba8efbe3c
707,903
def le_assinatura(): """[A funcao le os valores dos tracos linguisticos do modelo e devolve uma assinatura a ser comparada com os textos fornecidos] Returns: [list] -- [description] """ print("Bem-vindo ao detector automático de COH-PIAH.") print("Informe a assinatura típica de um aluno infectado:") wal = float(input("Entre o tamanho médio de palavra:")) ttr = float(input("Entre a relação Type-Token:")) hlr = float(input("Entre a Razão Hapax Legomana:")) sal = float(input("Entre o tamanho médio de sentença:")) sac = float(input("Entre a complexidade média da sentença:")) pal = float(input("Entre o tamanho medio de frase:")) return [wal, ttr, hlr, sal, sac, pal]
b6a0bacb02f3f878a88a6681d87d11408c292fe2
707,904
import argparse def parse_args() -> argparse.Namespace: """ Creates the parser for train arguments Returns: The parser """ parse = argparse.ArgumentParser() parse.add_argument('--local_rank', dest='local_rank', type=int, default=0) parse.add_argument('--port', dest='port', type=int, default=44554) parse.add_argument('--model', dest='model', type=str, default='bisenetv2') parse.add_argument('--finetune-from', type=str, default='/home/bina/PycharmProjects/fast-segmentation/models/5/best_model.pth') parse.add_argument('--im_root', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data') parse.add_argument('--train_im_anns', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/train.txt') parse.add_argument('--val_im_anns', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/val.txt') parse.add_argument('--log_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/logs/regular_logs') parse.add_argument('--false_analysis_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/false_analysis') parse.add_argument('--tensorboard_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/logs/tensorboard_logs') parse.add_argument('--models_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/models') parse.add_argument('--config_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/configs/main_cfg.yaml') parse.add_argument('--amp', type=bool, default=True) return parse.parse_args()
70370de42d6e80f108399776c119724c3417a125
707,905
def GenerateConfig(_): """Returns empty string.""" return ''
ed42eb1c320ca1df25603a53d4abf4a1b14215f3
707,906
def if_statement(env, node): """ 'If' statement def for AST. interpret - runtime function for Evaluator (true of false statement depending on condition). """ condition_value = node.condition.interpret(env) if condition_value: node.true_stmt.interpret(env) else: if node.alternatives_stmt: for alternative_stmt in node.alternatives_stmt: alternative_condition_value = alternative_stmt.interpret(env) if alternative_condition_value: return True if node.false_stmt: node.false_stmt.interpret(env) return condition_value
96522698c42d7649d3951f5fd2ffe3bbd992c985
707,908
import re import json def str_to_list_1(string): """ Parameters ---------- string : str The str of first line in each sample of sample.txt Returns --------- final_list: lst """ final_list = [] li = re.findall(r'\[.*?\]', string) for ele in li: final_list.append(json.loads(ele)) return final_list
92b4b11a339d2101a0af5408caee58cc9b9668a1
707,909
def one_mini_batch(data, batch_indices): """ 产生每一次的小的batch :param data: :param batch_indices: :return: """ batch_data = { "raw_data": [data[i] for i in batch_indices], "word_id_list": [], "label_vector": [] } for data in batch_data["raw_data"]: batch_data["word_id_list"].append(data["word_id_list"]) batch_data["label_vector"].append(data["label_vector"]) return batch_data
2bbbd62a00422431bb3322ebfce26d7fe95edc09
707,910
def get_maya_property_name(prop, ignore_channel=False): """ Given a property, return a reasonable Maya name to use for it. If ignore_channel is True, return the property for the whole vector, eg. return '.translate' instead of '.translateX'. This doesn't create or query anything. It just generates a name to use elsewhere. """ prop_parts = prop.path.split('/') # Get the property key, without any channel suffixes attached. prop_key = prop_parts[0] mapping = { 'translation': 'translate', 'rotation': 'rotate', 'scale': 'scale', } maya_key = None if prop_key in mapping: prop_key = mapping[prop_key] if prop.path.count('/') == 1 and not ignore_channel: # If we've been given a single channel, eg. rotation/x, return it. assert len(prop_parts) == 2, prop_parts assert prop_parts[1] in ('x', 'y', 'z'), prop_parts return '%s%s' % (prop_key, prop_parts[1].upper()) else: # Otherwise, return the vector itself. return prop_key
591a49f054db3936d5a345919a2c69491b6f345e
707,911
def prefix_sums(A): """ This function calculate of sums of eements in given slice (contiguous segments of array). Its main idea uses prefix sums which are defined as the consecutive totals of the first 0, 1, 2, . . . , n elements of an array. Args: A: an array represents number of mushrooms growing on the consecutive spots along a road. Returns: an array contains the consecutive sums of the first n elements of an array A To use: >> A=[2,3,7,5,1,3,9] >> print(prefix_sums(A)) [0, 2, 5, 12, 17, 18, 21, 30] Time Complexity: O(n) """ n = len(A) P = [0] * (n + 1) for k in range(1, n + 1): P[k] = P[k - 1] + A[k - 1] return P
d61e49eb4a973f7718ccef864d8e09adf0e09ce2
707,913
import re def checkParams(opts): """ 检查模块名是否符合命名规则 检查目录是否存在 """ res = {} for opt, arg in opts: if opt in ('--name'): if re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', arg): res['name'] = arg else: return res elif opt in ('--dir'): res['dir'] = arg; elif opt in ('--type'): res['type'] = arg else: print("Unknown option " + arg) res['dir'] = res['dir'] + res['name'] + '/' return res
5b8306a1c9805786e4a98509dcea3af59ffd04d1
707,915
def sql2dict(queryset): """Return a SQL alchemy style query result into a list of dicts. Args: queryset (object): The SQL alchemy result. Returns: result (list): The converted query set. """ if queryset is None: return [] return [record.__dict__ for record in queryset]
c55fa18773142cca591aac8ed6bdc37657569961
707,916
def float_to_bin(x, m_digits:int): """ Convert a number x in range [0,1] to a binary string truncated to length m_digits arguments: x: float m_digits: integer return: x_bin: string The decimal representation of digits AFTER '0.' Ex: Input 0.75 has binary representation 0.11 Then this function would return '11' """ if x < 0 or x >= 1: raise ValueError("x must be in interval [0,1)") x_round = round(x * 2**m_digits) # print(x_round) # print(2**m_digits) if x_round == 2**m_digits: x_round = 0 x_raw = bin(x_round) x_bin = x_raw[2:].zfill(m_digits) return x_bin
f95e72d9449b66681575b230f6c858e8b3833cc2
707,917
from typing import Callable from typing import List def apply(func: Callable, args: List): """Call `func` expanding `args`. Example: >>> def add(a, b): >>> return a + b >>> apply(add, [1, 2]) 3 """ return func(*args)
f866087d07c7c036b405f8d97ba993f12c392d76
707,918
import torch def l2_mat(b1, b2): """b1 has size B x M x D, b2 has size b2 B x N x D, res has size P x M x N Args: b1: b2: Returns: """ b1_norm = b1.pow(2).sum(dim=-1, keepdim=True) b2_norm = b2.pow(2).sum(dim=-1, keepdim=True) res = torch.addmm(b2_norm.transpose(-2, -1), b1, b2.transpose(-2, -1), alpha=-2).add_(b1_norm) # mask = 1.0 - torch.ones(res.shape[0]).diag().to(res.device) res = res.clamp_min_(torch.finfo(torch.float32).eps).sqrt_() # res = res * mask return res
ad254c2c11dccab5dd97c7e72ef3b00c7b6143fb
707,919
import fnmatch import os def find_files(base, pattern): """Return list of files matching pattern in base folder.""" return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]
e84dd19e6746d92de1852f162eaa997734ac245c
707,920
def compute_correlations(states): """compute_correlations. Calculate the average correlation of spin 0 and every other spin. Parameters ---------- states : list of states. ``len(states)`` must be >= 1! Returns ------- correlations : list of floats. """ return [ sum(s[0] * s[i] for s in states) / len(states) for i in range(len(states[0])) ]
471949aa63a3d65b262fb9dad1c77d160a3f5ac7
707,921
def rhs_of_rule(rule): """ This function takes a grammatical rule, and returns its RHS """ return rule[0]
004b99ac97c50f7b33cc798997463a28c3ae9a6f
707,922
def to_bin(val): """ Receive int and return a string in binary. Padded by 32 bits considering 2's complement for negative values """ COMMON_DIGITS = 32 val_str = "{:b}".format(val) # Count '-' in negative case padded_len = len(val_str) + ((COMMON_DIGITS - (len(val_str) % COMMON_DIGITS)) % COMMON_DIGITS) if val < 0: val_2_complement = val & ((1 << padded_len) - 1) final_val_str = "{:b}".format(val_2_complement) else: final_val_str = "0" * (padded_len - len(val_str)) + val_str return(final_val_str)
819d1c0a9d387f6ad1635f0fe0e2ab98b3ca17b0
707,923
import os def reddit_client_secret() -> str: """Client secret of the reddit app.""" value = os.getenv("REDDIT_CLIENT_SECRET") if not value: raise ValueError("REDDIT_CLIENT_SECRET environment variable not set") return value
dfddbb4b7306b9638b68f3b75721471a82118a64
707,924
def rgetattr(obj, attr): """ Get named attribute from an object, i.e. getattr(obj, 'a.a') is equivalent to ``obj.a.a''. - obj: object - attr: attribute name(s) >>> class A: pass >>> a = A() >>> a.a = A() >>> a.a.a = 1 >>> rgetattr(a, 'a.a') 1 >>> rgetattr(a, 'a.c') Traceback (most recent call last): ... AttributeError: 'A' object has no attribute 'c' """ attrs = attr.split(".") obj = getattr(obj, attrs[0]) for name in attrs[1:]: obj = getattr(obj, name) return obj
5fb58634c4ba910d0a20753c04addf667614a07f
707,925
def lambda1_plus_lambda2(lambda1, lambda2): """Return the sum of the primary objects tidal deformability and the secondary objects tidal deformability """ return lambda1 + lambda2
4ac3ef51bb66861b06b16cec564f0773c7692775
707,926
def unicode_is_ascii(u_string): """Determine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool """ assert isinstance(u_string, str) try: u_string.encode('ascii') return True except UnicodeEncodeError: return False
2a742c7334d68fe0bf6b546fb79bf00a338355f9
707,927
import grp import os import subprocess def _is_industrial_user(): """Checking if industrial user is trying to use relion_it..""" if not grp: # We're not on a linux/unix system, therefore not at Diamond return False not_allowed = ["m10_valid_users", "m10_staff", "m08_valid_users", "m08_staff"] uid = os.getegid() fedid = grp.getgrgid(uid)[0] groups = str(subprocess.check_output(["groups", str(fedid)])) return any(group in groups for group in not_allowed)
8b462431a96b25b7fc9a456807bfcd087a799651
707,928
def production(*args): """Creates a production rule or list of rules from the input. Supports two kinds of input: A parsed string of form "S->ABC" where S is a single character, and ABC is a string of characters. S is the input symbol, ABC is the output symbols. Neither S nor ABC can be any of the characters "-", ">" for obvious reasons. A tuple of type (S, Seq, ...) where S is the symbol of some hashable type and seq is an finite iterable representing the output symbols. Naturally if you don't want to use characters/strings to represent symbols then you'll typically need to use the second form. You can pass multiple inputs to generate multiple production rules, in that case the result is a list of rules, not a single rule. If you pass multiple inputs the symbol must differ since a simple L-System only supports one production rule per symbol. Example: >>> production("F->Ab[]") ('F', ['A', 'b', '[', ']']) >>> production("F->Ab[]", ("P", "bAz"), (1, (0,1))) [('F', ['A', 'b', '[', ']']), ('P', ['b', 'A', 'z']), (1, [0, 1])] """ if len(args) < 1: raise ValueError("missing arguments") res = [] for a in args: if issubclass(str, type(a)): parts = a.split(sep="->", maxsplit=1) if len(parts) < 2: raise ValueError("couldn't parse invalid string \"{}\"".format(a)) res.append((parts[0], list(parts[1]))) elif issubclass(tuple, type(a)): s, to, *vals = a res.append((s, list(to))) else: raise TypeError("sorry don't know what to do with " + str(type(a))) if len(res) == 1: return res[0] return res
bcb3e415a283f654ab65e0656a3c7e3912eeb53b
707,929
def flatten(items): """Convert a sequence of sequences to a single flat sequence. Works on dictionaries, tuples, lists. """ result = [] for item in items: if isinstance(item, list): result += flatten(item) else: result.append(item) return result
d44e3391f791dfd2ec9b323c37c510a415bb23bf
707,930
from typing import Tuple def extract_entity_type_and_name_from_uri(uri: str) -> Tuple[str, str]: """ 从entity uri中提取出其type和name :param uri: 如 http://www.kg.com/kg/ontoligies/ifa#Firm/百度 :return: ('Firm', '百度') """ name_separator = uri.rfind('/') type_separator = uri.rfind('#') return uri[type_separator + 1: name_separator], uri[name_separator + 1:]
a70b1fdb5490f029cc6a88bee53eee048731a709
707,931
def load_rokdoc_well_markers(infile): """ Function to load well markers exported from RokDoc in ASCII format. """ with open(infile, 'r') as fd: buf = fd.readlines() marker = [] well = [] md = [] tvdkb = [] twt = [] tvdss = [] x = [] y = [] for line in buf[5:]: c1, c2, c3, c4, c5 = line.split("'") c6, c7, c8, c9, c10, c11 = c5.strip().split() marker.append(c2) well.append(c4) md.append(float(c6)) tvdkb.append(float(c7)) twt.append(float(c8)) tvdss.append(float(c9)) x.append(float(c10)) y.append(float(c11)) markers = {} for each in list(set(well)): markers[each] = {} for i in range(len(marker)): cur_well = well[i] cur_marker = marker[i] cur_md = md[i] cur_tvdkb = tvdkb[i] cur_tvdss = tvdss[i] cur_twt = twt[i] cur_x = x[i] cur_y = y[i] markers[cur_well][cur_marker] = {'md': cur_md, 'tvdkb': cur_tvdkb, 'tvdss': cur_tvdss, 'twt': cur_twt, 'x': cur_x, 'y': cur_y} return markers
f3a781accdd84ff2f5aff12e59aeff05aa428d6a
707,933
def _is_src(file): """ Returns true if the file is a source file Bazel allows for headers in the srcs attributes, we need to filter them out. Args: file (File): The file to check. """ if file.extension in ["c", "cc", "cpp", "cxx", "C", "c++", "C++"] and \ file.is_source: return True return False
b0466073d4d1b05c5cab37946fb6ca8432dc752d
707,934
def calculate_signal_strength(rssi): # type: (int) -> int """Calculate the signal strength of access point.""" signal_strength = 0 if rssi >= -50: signal_strength = 100 else: signal_strength = 2 * (rssi + 100) return signal_strength
d5a0955446e0fe0548639ddd1a849f7e7901c36b
707,935
def lookAtThisMethod( first_parameter, second_paramter=None, third_parameter=32, fourth_parameter="a short string as default argument", **kwargs ): """The point of this is see how it reformats parameters It might be fun to see what goes on Here I guess it should respect this spacing, since we are in a comment. We are done! """ return kwargs["whatever"]( first_parameter * third_parameter, second_paramter, fourth_parameter, "extra string because I want to", )
8dab028b40184bb7cf686c524d5abd452cee2bc3
707,936
def parse_bjobs_nodes(output): """Parse and return the bjobs command run with options to obtain node list, i.e. with `-w`. This function parses and returns the nodes of a job in a list with the duplicates removed. :param output: output of the `bjobs -w` command :type output: str :return: compute nodes of the allocation or job :rtype: list of str """ nodes = [] lines = output.split("\n") nodes_str = lines[1].split()[5] nodes = nodes_str.split(":") return list(dict.fromkeys(nodes))
a582307d0d869d2dbde454928571246320cb6e31
707,938
def coord_for(n, a=0, b=1): """Function that takes 3 parameters or arguments, listed above, and returns a list of the interval division coordinates.""" a=float(a) b=float(b) coords = [] inc = (b-a)/ n for x in range(n+1): coords.append(a+inc*x) return coords
57e12200dcc113786c9deeb4865d7906d74c763f
707,940
import subprocess def get_rpm_package_list(): """ Gets all installed packages in the system """ pkgstr = subprocess.check_output(['rpm', '-qa', '--queryformat', '%{NAME}\n']) return pkgstr.splitlines()
3ffddefe7e3859f4bc76ae5581b89338c376e03f
707,941
def validate_ints(*args): """ validates that inputs are ints only """ for value in args: if not isinstance(value, int): return False return True
e56ebf78e072731188b2c8282289d307fcfaabdf
707,942
def _cals(raw): """Helper to deal with the .cals->._cals attribute change.""" try: return raw._cals except AttributeError: return raw.cals
a08273a559b780022c04fe5d5d60a71c600fd481
707,943
def workaround_issue_20(handler): """ Workaround for https://github.com/pytest-dev/pytest-services/issues/20, disabling installation of a broken handler. """ return hasattr(handler, 'socket')
20d688aedad9e771362d97ad9cac391e7dbfac32
707,944
def item_count(sequences, sequence_column_name): """ input:Dataframe sequences """ item_max_id = sequences[sequence_column_name].map(max).max() return int(item_max_id)
9bcb64ff3389ef34ed297bca4f55b4de66ac5966
707,945
def define_network(*addr): """gives all network related data or host addresses if requested addr = tuple of arguments netaddr/mask[nb of requested hosts] """ if len(addr) == 2: # provides list of host-addresses for this subnet # we do this by calling the generator host_g host_g = addr[0].hosts() return [next(host_g).exploded for i in range(addr[1])] else: netdef = [(' Network Address:', addr[0].network_address.exploded), (' Broadcast Address:', addr[0].broadcast_address.exploded), (' Valid Hosts:', 2 ** (32 - addr[0].prefixlen)-2), (' Wildcard Mask:', addr[0].hostmask.exploded), (' Mask bits:', addr[0].prefixlen), ] return [(' '+addr[0].network_address.exploded+'/32', '')] \ if addr[0].prefixlen == 32 else netdef
905cf702fda005645c608b9dadb84f3659d991c1
707,946
from typing import List def init_anim() -> List: """Initialize the animation.""" return []
121fff8b4102c2961449d970307e762bd983bdbe
707,947
def keep_digits(txt: str) -> str: """Discard from ``txt`` all non-numeric characters.""" return "".join(filter(str.isdigit, txt))
34387003ea03651dd2582b3c49f1095c5589167b
707,948
import six def validate_hatch(s): """ Validate a hatch pattern. A hatch pattern string can have any sequence of the following characters: ``\\ / | - + * . x o O``. """ if not isinstance(s, six.text_type): raise ValueError("Hatch pattern must be a string") unique_chars = set(s) unknown = (unique_chars - set(['\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O'])) if unknown: raise ValueError("Unknown hatch symbol(s): %s" % list(unknown)) return s
4ddf056dab2681759a462005effc4ae5488a4461
707,950
def filter_example(config, example, mode="train"): """ Whether filter a given example according to configure. :param config: config contains parameters for filtering example :param example: an example instance :param mode: "train" or "test", they differs in filter restrictions :return: boolean """ if mode == "train": return (len(example["ans_sent_tokens"]) > config.sent_limit or len(example["ques_tokens"]) > config.ques_limit or (example["y2_in_sent"] - example["y1_in_sent"]) > config.ans_limit) elif mode == "test": return (len(example["ans_sent_tokens"]) > config.sent_limit or len(example["ques_tokens"]) > config.ques_limit) else: print("mode must be train or test")
9c49990fe36c0a82d0a99a62fe810a19cd5a8749
707,951
def _dict_flatten(data): """Return flattened dict of input dict <data>. After https://codereview.stackexchange.com/revisions/21035/3 Parameters ---------- data : dict Input dict to flatten Returns ------- fdata : dict Flattened dict. """ def expand(key, value): """Expand list.""" if isinstance(value, dict): return [(key+'>'+k, v) for k, v in _dict_flatten(value).items()] else: return [(key, value)] return dict([item for k, v in data.items() for item in expand(k, v)])
a1db4a552ced44efa45fe4f86fbfe04871463356
707,952
def _get_item(i, j, block): """ Returns a single item from the block. Coords must be in block space. """ return block[i, j]
45a12ecb3959a75ad8f026616242ba64174441fc
707,953
def check_all_rows(A): """ Check if all rows in 2-dimensional matrix don't have more than one queen """ for row_inx in range(len(A)): # compute sum of row row_inx if sum(A[row_inx]) > 1: return False return True
e39f4ca3e401c02b13c5b55ed4389a7e6deceb40
707,954
import subprocess import sys def _run_cli_cmd(cmd_list): """Run a shell command and return the error code. :param cmd_list: A list of strings that make up the command to execute. """ try: return subprocess.call(cmd_list) except Exception as e: print(str(e)) sys.exit(1)
473d28ec5469ff195b716edfe32723e2379303f3
707,955
def group_set_array_data_ptr(d): """ call view%set_external_data_ptr hide c_loc call and add target attribute """ # XXX - should this check the type/shape of value against the view? # typename - part of function name # nd - number of dimensions # f_type - fortran type # shape - :,:, to match nd if d['rank'] == 0: extents_decl = 'extents(1)' extents_asgn = 'extents(1) = 1_SIDRE_IndexType' else: extents_decl = 'extents(%d)' % d['rank'] extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)' return """ ! Generated by genfsidresplicer.py ! This function does nothing if view name does not exist in group. subroutine group_set_array_data_ptr_{typename}{nd}(grp, name, value) use iso_c_binding implicit none class(SidreGroup), intent(IN) :: grp character(len=*), intent(IN) :: name {f_type}, target, intent(IN) :: value{shape} integer(C_INT) :: lname type(SIDRE_SHROUD_view_capsule) view ! integer(SIDRE_IndexType) :: {extents_decl} ! integer(C_INT), parameter :: type = {sidre_type} type(C_PTR) addr, viewptr lname = len_trim(name) ! {extents_asgn} viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view) if (c_associated(view%addr)) then #ifdef USE_C_LOC_WITH_ASSUMED_SHAPE addr = c_loc(value) #else call SIDRE_C_LOC(value{lower_bound}, addr) #endif call c_view_set_external_data_ptr_only(view, addr) ! call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents) endif end subroutine group_set_array_data_ptr_{typename}{nd}""".format( extents_decl=extents_decl, extents_asgn=extents_asgn, **d)
36a18ca9099edf24d37386103f111bde7753ed46
707,956
def get_experiment_type(filename): """ Get the experiment type from the filename. The filename is assumed to be in the form of: '<reliability>_<durability>_<history kind>_<topic>_<timestamp>' :param filename: The filename to get the type. :return: A string where the timesptamp is taken out from the filename. """ file_type = '' filename = filename.split('/')[-1] elements = filename.split('_') for i in range(0, len(elements) - 3): file_type += '{}_'.format(elements[i]) file_type = file_type[:-1] return file_type
e1853a95d034b8f9e36ca65f6f5d200cbf4b86dc
707,957
import pytz def getAwareTime(tt): """ Generates timezone aware timestamp from timezone unaware timestamp PARAMETERS ------------ :param tt: datatime timezome unaware timestamp RETURNS ------------ :return: datatime timezone aware timestamp """ timezone = pytz.timezone("Europe/Amsterdam") return (timezone.localize(tt))
1b286c92c7f5d8f0ff48d77296489fbd358c14ce
707,958
def inverse_word_map(word_map): """ Create an inverse word mapping. :param word_map: word mapping """ return {v: k for k, v in word_map.items()}
4048a21ea1c75791a92d57ee0a440a6c9d31b6b9
707,959
def get_coalition_wins_sql_string_for_state(coalition_id,state_id): """ :type party_id: integer """ str = """ select lr.candidate_id, c.fullname as winning_candidate, lr.constituency_id, cons.name as constituency, lr.party_id, lr.max_votes, (lr.max_votes-sr.votes) as lead, sr.candidate_id, loosing_candidate.fullname as runner_up, loosing_party.name as runner_up_party, sr.party_id, winning_party.name, ltw.party_id from latest_results lr inner join latest_runners_up as sr on sr.constituency_id = lr.constituency_id inner join candidate c on c.id = lr.candidate_id inner join constituency cons on cons.id = lr.constituency_id inner join party winning_party on lr.party_id = winning_party.id inner join party loosing_party on loosing_party.id = sr.party_id inner join candidate loosing_candidate on loosing_candidate.id = sr.candidate_id inner join last_time_winners ltw on ltw.constituency_id = lr.constituency_id where winning_party.coalition_id = %s and cons.state_id = %s and lr.status = 'DECLARED' order by lead DESC""" % (coalition_id,state_id) return str;
76fb0704779e20e8a53ca80dc17c969f1e455d20
707,960
import torch def iou_score(pred_cls, true_cls, nclass, drop=(), mask=None): """ compute the intersection-over-union score both inputs should be categorical (as opposed to one-hot) """ assert pred_cls.shape == true_cls.shape, 'Shape of predictions should match GT' if mask is not None: assert mask.dim() == true_cls.dim(), \ 'Mask should have the same dimensions as inputs' intersect_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device()) union_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device()) idx = 0 for i in range(nclass): if i not in drop: intersect = (pred_cls == i).byte() + (true_cls == i).byte() if mask is not None: intersect *= mask.byte() intersect = intersect.eq(2).sum() union = (pred_cls == i).byte() + (true_cls == i).byte() if mask is not None: union *= mask.byte() union = union.ge(1).sum() intersect_[idx] = intersect union_[idx] = union idx += 1 return intersect_, union_
d38871f339b2126d418a7fca53fbfd874e263aa2
707,961
import queue def task_checkqueue(storage): """ Task that watches a queue for messages and acts on them when received. """ # Get the queue object from the storage dictionary thequeue = storage.get("queue") try: # Use a timeout so it blocks for at-most 0.5 seconds while waiting for a message. Smaller values can be used to # increase the cycling of the task and responsiveness to Threadify control signals (like pause) if desired. msg = thequeue.get(block=True, timeout=.5) except queue.Empty: print("_", end="") else: if msg == "QUIT": return False # Print received message print("{:s}".format(msg), end="") return True
3c7e8cfda53abb0551916894719e66b3d27886e9
707,962
import os def get_stand_exe() -> str: """Get the path to standexe Returns: Path to standexe Raises: ValueError: If STAND_EXE is not found in environment variables. """ if os.environ['STAND_EXE']: return os.environ['STAND_EXE'] else: raise ValueError('STAND_EXE environment variable is not found.')
da44d23239060874965617c24ab0bd678c9535b9
707,964
def b64pad(b64data): """Pad base64 string with '=' to achieve a length that is a multiple of 4 """ return b64data + '=' * (4 - (len(b64data) % 4))
bdc14821bfbdbf220ff371fbe5e486d3e682337b
707,965
def parse_copy_core_dump(raw_result): """ Parse the 'parse_copy_core_dump' command raw output. :param str raw_result: copy core-dump raw result string. :rtype: dict :return: The parsed result of the copy core-dump to server: :: { 0:{ 'status': 'success' 'reason': 'core dump copied' } } """ if "Error code " in raw_result: return {"status": "failed", "reason": "Error found while coping"} if "No coredump found for" in raw_result: return {"status": "failed", "reason": "no core dump found"} if "Failed to validate instance ID" in raw_result: return {"status": "failed", "reason": "instance ID not valid"} if "ssh: connect to host" in raw_result: return {"status": "failed", "reason": "ssh-connection issue for SFTP"} if ( "copying ..." in raw_result and "Sent " in raw_result and "bytes" in raw_result and "seconds" in raw_result ): return {"status": "success", "reason": "core dump copied"} else: return {"status": "failed", "reason": "undefined error"}
4ce168c9bc8c462ecc36beba889adb36cc64135d
707,966
def compute_lifting_parameter(lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff): """One way to compute a per-particle "4D" offset in terms of an adjustable lamb and constant per-particle parameters. Notes ----- (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset followed by a scaling by cutoff. lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction independent of the lambda value. lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate in a lambda-dependent way. """ w = cutoff * (lambda_plane_idxs + lambda_offset_idxs * lamb) return w
a9455ed67fcb21bcf1382fe66a77e0563f467421
707,967
def endgame_score_connectfour(board, is_current_player_maximizer) : """Given an endgame board, returns 1000 if the maximizer has won, -1000 if the minimizer has won, or 0 in case of a tie.""" chains_1 = board.get_all_chains(current_player=is_current_player_maximizer) chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer)) for chain in chains_1: if len(chain) == 4: return 1000 for chain in chains_2: if len(chain) == 4: return -1000 return 0
bcb37381a9633377cb3405fbae45123e2a391df9
707,969
import copy def identify_all_failure_paths(network_df_in,edge_failure_set,flow_dataframe,path_criteria): """Identify all paths that contain an edge Parameters --------- network_df_in - Pandas DataFrame of network edge_failure_set - List of string edge ID's flow_dataframe - Pandas DataFrame of list of edge paths path_criteria - String name of column of edge paths in flow dataframe Outputs ------- network_df - Pandas DataFrame of network With removed edges edge_path_index - List of integer indexes Of locations of paths in flow dataframe """ edge_path_index = [] network_df = copy.deepcopy(network_df_in) for edge in edge_failure_set: network_df = network_df[network_df.edge_id != edge] edge_path_index += flow_dataframe.loc[flow_dataframe[path_criteria].str.contains( "'{}'".format(edge))].index.tolist() edge_path_index = list(set(edge_path_index)) return network_df, edge_path_index
db2da6ad20a4ae547c309ac63b6e68a17c3874e7
707,970
def update_bitweights(realization, asgn, tileids, tg_ids, tg_ids2idx, bitweights): """ Update bit weights for assigned science targets """ for tileid in tileids: try: # Find which targets were assigned adata = asgn.tile_location_target(tileid) for loc, tgid in adata.items(): idx = tg_ids2idx[tgid] bitweights[realization * len(tg_ids) + idx] = True except: pass return bitweights
f1b7e085d43e36b025aa1c61ab1b7156ba1d3ed7
707,971
def load_from_input_flags(params, params_source, input_flags): """Update params dictionary with input flags. Args: params: Python dictionary of hyperparameters. params_source: Python dictionary to record source of hyperparameters. input_flags: All the flags with non-null value of overridden hyperparameters. Returns: Python dict of hyperparameters. """ if params is None: raise ValueError( 'Input dictionary is empty. It is expected to be loaded with default ' 'values') if not isinstance(params, dict): raise ValueError( 'The base parameter set must be a Python dict, was: {}'.format( type(params))) for key in params: flag_value = input_flags.get_flag_value(key, None) if flag_value is not None: params[key] = flag_value params_source[key] = 'Command-line flags' return params, params_source
7ec8662f03469f1ed03f29c9f7e9663c49aa7056
707,972
import os import glob def generate_dada_filelist(filename): """ Generate a list of DADA files from start filename Args: filename (str): Path to file. e.g. /data/dprice/2020-07-23-02:33:07.587_0000000000000000.000000.dada Returns: flist (list): A list of all associated files """ bn = os.path.basename(filename) dn = os.path.dirname(filename) bn_root = '_'.join(bn.split('_')[:-1]) # Strips off _000.000.dada bit flist = sorted(glob.glob(os.path.join(dn, bn_root + '_*.dada'))) return flist
55cde1a818e78886ace3aa89ff9535f099033a79
707,973
import os import shutil import subprocess def buildWheels(buildDir, requirements): """build wheels :param buildDir: directory to put wheels in (under 'wheelhouse') :type buildDir: string :param requirements: name of file holding names of Python packages :type requirements: string """ wheelhouse = os.path.join(buildDir, 'wheelhouse') if os.path.exists(wheelhouse): shutil.rmtree(wheelhouse) subprocess.check_call(['pip', 'wheel', '--requirement', requirements, '--wheel-dir', wheelhouse]) subprocess.check_call(['pip', 'wheel', 'setuptools==15.2', '--wheel-dir', wheelhouse]) subprocess.check_call(['pip', 'wheel', '.', '--wheel-dir', wheelhouse]) return wheelhouse
81bb1879ee1ce0e711dc36fe55cf0b47ad48f3c7
707,974
def kml_start(params): """Define basic kml header string""" kmlstart = ''' <Document> <name>%s</name> <open>1</open> <description>%s</description> ''' return kmlstart % (params[0], params[1])
c2fa4c1eeff086dfc3baa41ecd067634920b25b1
707,975
import sys import numpy def _create_rpc_callback(label, result_counter): """Creates RPC callback function. Args: label: The correct label for the predicted example. result_counter: Counter for the prediction result. Returns: The callback function. """ def _callback(result_future): """Callback function. Calculates the statistics for the prediction result. Args: result_future: Result future of the RPC. """ exception = result_future.exception() if exception: result_counter.inc_error() print(exception) else: sys.stdout.write('.') sys.stdout.flush() response = numpy.array( result_future.result().outputs['scores'].float_val) prediction = numpy.argmax(response) if label != prediction: result_counter.inc_error() result_counter.inc_done() result_counter.dec_active() return _callback
6b3276e9db5d551cb5abdd3f3f9b1f5ce041b02e
707,976
def get_file_iterator(options): """ returns a sequence of files raises IOError if problemmatic raises ValueError if problemmatic """ # -------- BUILD FILE ITERATOR/GENERATOR -------- if options.f is not None: files = options.f elif options.l is not None: try: lfile = open(options.l, 'r') # make a generator of non-blank lines files = (line.strip() for line in lfile if line.strip()) except IOError: msg = "{0} does not exist.".format(options.l) raise IOError(msg) else: msg = "Must provide input files or file list." raise ValueError(msg) return files
53b16f49d14dc346e404a63415772dd2a1d10f50
707,977
def find_last_match(view, what, start, end, flags=0): """Find last occurrence of `what` between `start`, `end`. """ match = view.find(what, start, flags) new_match = None while match: new_match = view.find(what, match.end(), flags) if new_match and new_match.end() <= end: match = new_match else: return match
fc863cf00d05a1fb6302a34b5b1e891e3c9eb3d7
707,978
import asyncio async def get_series(database, series_id): """Get a series.""" series_query = """ select series.id, series.played, series_metadata.name, rounds.tournament_id, tournaments.id as tournament_id, tournaments.name as tournament_name, events.id as event_id, events.name as event_name from series join rounds on series.round_id=rounds.id join series_metadata on series.id=series_metadata.series_id join tournaments on rounds.tournament_id=tournaments.id join events on tournaments.event_id=events.id where series.id=:id """ participants_query = 'select series_id, name, score, winner from participants where series_id=:id' matches_query = 'select id, series_id from matches where series_id=:id' values = {'id': series_id} series, participants, matches = await asyncio.gather( database.fetch_one(series_query, values=values), database.fetch_all(participants_query, values=values), database.fetch_all(matches_query, values=values) ) return dict( series, participants=list(map(dict, participants)), match_ids=list(map(lambda m: m['id'], matches)), tournament=dict( id=series['tournament_id'], name=series['tournament_name'], event=dict( id=series['event_id'], name=series['event_name'] ) ) )
f5e122052209c399c41afcd579f9b16e863c7a28
707,979
import math def calc_distance(p1, p2): """ calculates a distance on a 2d euclidean space, between two points""" dist = math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) return dist
d4005d44d5724c051860fb9aa2edeab1654157c6
707,980
def is_unique_n_bit_vector(string: str) -> bool: """ Similiar to the dict solution, it just uses a bit vector instead of a dict or array. """ vector = 0 for letter in string: if vector & 1 << ord(letter): return False vector |= 1 << ord(letter) return True
d19609f1fb1e6a189a9adb11b37a96632c8d0958
707,981
def crtb_cb(client, crtb): """Wait for the crtb to have the userId populated""" def cb(): c = client.reload(crtb) return c.userId is not None return cb
eff248a877e195e59d2f6db812af2ff43955aee0
707,982
def _matching_not_matching(on, **kwargs): """ Change the text for matching/not matching """ text = "matching" if not on else "not matching" classname = "colour-off" if not on else "colour-on" return text, classname
aeefa7f16e3268ffe7af93db72490abe053370b2
707,983
import json def metadata_to_list(metadata): """Transform a metadata dictionary retrieved from Cassandra to a list of tuples. If metadata items are lists they are split into multiple pairs in the result list :param metadata: dict""" res = [] for k, v in metadata.iteritems(): try: val_json = json.loads(v) val = val_json.get('json', '') # If the value is a list we create several pairs in the result if isinstance(val, list): for el in val: res.append((k, el)) else: if val: res.append((k, val)) except ValueError: if v: res.append((k, v)) return res
1044a93742a635e72e443d3a5c2e5805702d1602
707,984
def keep_english_for_spacy_nn(df): """This function takes the DataFrame for songs and keep songs with english as main language for english version of spacy neural network for word processing""" #Keep only english for spacy NN English preprocessing words #Network for other languages like french, spanish, portuguese are also available df = df.loc[df['Main Language'] == 'en',:] #Drop the translation column not use for lyrics in english df.drop(['English Translation Lyrics'],axis =1,inplace = True) return df
e24402fa91ee0444c86867c98777fbd3cb7c9894
707,985
def parse_coap_response_code(response_code): """ Parse the binary code from CoAP response and return the response code as a float. See also https://tools.ietf.org/html/rfc7252#section-5.9 for response code definitions. :rtype float """ response_code_class = response_code // 32 response_code_detail = response_code % 32 # Compose response code return response_code_class + response_code_detail / 100
9a8165f205ec2f6fe8576e18a831498f82834a10
707,986
def binary_search(sorted_list, item): """ Implements a Binary Search, O(log n). If item is is list, returns amount of steps. If item not in list, returns None. """ steps = 0 start = 0 end = len(sorted_list) while start < end: steps += 1 mid = (start + end) // 2 # print("#", mid) if sorted_list[mid] == item: return steps # If the item is lesser than the list # item == 3 and sorted_list == [1, 2, 3, 4, 5, 6, 8] # the END of my list becomes the middle (4), excluding all items from the middle to the end # end == 4 # next time, when mid = (start + end) // 2 executes, mid == 2 if sorted_list[mid] > item: end = mid # If the item is bigger than the list # item == 8 and sorted_list == [1, 2, 3, 4, 5, 6, 8] # the START of my list will be the middle (4) plus 1, excluding all items from the middle to the begginning # start == 5 # next time, when mid = (start + end) // 2 executes, mid == 8 if sorted_list[mid] < item: start = mid + 1 return None
30b1bba330752455d932b4c6cf1ad4dab5969db3
707,987
import traceback def selectgender(value): """格式化为是/否 :param value:M/F, :return: 男/女 """ absent = {"M": u'男', "F": u'女'} try: if value: return absent[value] return "" except: traceback.print_exc()
7b6b0b41b5ea8d3eaab5574881b40f5c00da73cd
707,988
def unpack_batch(batch, use_cuda=False): """ Unpack a batch from the data loader. """ input_ids = batch[0] input_mask = batch[1] segment_ids = batch[2] boundary_ids = batch[3] pos_ids = batch[4] rel_ids = batch[5] knowledge_feature = batch[6] bio_ids = batch[1] # knowledge_adjoin_matrix = batch[7] # know_segment_ids = batch[6] # know_input_ids = batch[7] # know_input_mask = batch[8] # knowledge_feature = (batch[6], batch[7], batch[8]) return input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids
6bc8bc9b3c8a9e2b40ac08e67c9fbcf84914e2eb
707,989
def truncate(text: str, length: int = 255, end: str = "...") -> str: """Truncate text. Parameters --------- text : str length : int, default 255 Max text length. end : str, default "..." The characters that come at the end of the text. Returns ------- truncated text : str Examples -------- .. code-block:: html <meta property="og:title" content="^^ truncate(title, 30) ^^">""" return f"{text[:length]}{end}"
f14605542418ca95e4752be7ec2fea189b9454ce
707,990
def use_bcbio_variation_recall(algs): """Processing uses bcbio-variation-recall. Avoids core requirement if not used. """ for alg in algs: jointcaller = alg.get("jointcaller", []) if not isinstance(jointcaller, (tuple, list)): jointcaller = [jointcaller] for caller in jointcaller: if caller not in set(["gatk-haplotype-joint", None, False]): return True return False
c833f9a2dd9523f78cf294a1822b251b6940a1cd
707,991
def merge_hedge_positions(df, hedge): """ 将一个表中的多条记录进行合并,然后对冲 :param self: :param df: :return: """ # 临时使用,主要是因为i1709.与i1709一类在分组时会出问题,i1709.是由api中查询得到 if df.empty: return df df['Symbol'] = df['InstrumentID'] # 合并 df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag', 'Side'])[ 'Position'].sum().to_frame().reset_index() # print(df) # 对冲 if hedge: df['Net'] = df['Side'] * df['Position'] df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag'])['Net'].sum().to_frame().reset_index() df['Position'] = abs(df['Net']) df['Side'] = df['Net'] / df['Position'] df = df[df['Position'] != 0] df = df[['Symbol', 'InstrumentID', 'HedgeFlag', 'Side', 'Position']] # print(df) return df
4bcaa8b160186c6c5e6e3382017d0db3ee9d6c6e
707,992
import numpy def BackwardSubTri(U,y): """ usage: x = BackwardSubTri(U,y) Row-oriented backward substitution to solve the upper-triangular, 'tridiagonal' linear system U x = y This function does not ensure that U has the correct nonzero structure. It does, however, attempt to catch the case where U is singular. Inputs: U - square n-by-n matrix (assumed upper triangular and 'tridiagonal') y - right-hand side vector (n-by-1) Outputs: x - solution vector (n-by-1) """ # check inputs m, n = numpy.shape(U) if (m != n): raise ValueError("BackwardSubTri error: matrix must be square") p = numpy.size(y) if (p != n): raise ValueError("BackwardSubTri error: right-hand side vector has incorrect dimensions") if (numpy.min(numpy.abs(numpy.diag(U))) < 100*numpy.finfo(float).eps): raise ValueError("BackwardSubTri error: matrix is [close to] singular") # create output vector x = y.copy() # perform forward-subsitution algorithm for i in range(n-1,-1,-1): if (i<n-1): x[i] -= U[i,i+1]*x[i+1] x[i] /= U[i,i] return x
5b7c2c636eac0912aa26bc8a236f1c870b95c48b
707,993
def get_parameter_by_name(device, name): """ Find the given device's parameter that belongs to the given name """ for i in device.parameters: if i.original_name == name: return i return
9669262a9bcac8b4c054e07b2c04b780b5f84f87
707,994
def sum_of_proper_divisors(number: int): """ Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n). :param number: :return: """ divisors = [] for n in range(1, number): if number % n == 0: divisors.append(n) return sum(divisors)
9015dd3809f90d328b0b4a6b51f6fcb145f0241d
707,996
import functools def catch_exception(func): """ Returns: object: """ @functools.wraps(func) def wrapper(*args, **kwargs): worker = kwargs['error_catcher'] try: return func(*args, **kwargs) except Exception as e: print('stdout:', worker.stdout.read().decode("utf-8")) print('stderr:', worker.stderr.read().decode("utf-8")) raise return wrapper
be579d9b6723e5025b7b70f38c83bcedc30196a5
707,997
def int_max(int_a, int_b): """ max(a, b) """ if int_a > int_b: return int_a else: return int_b
5ae0df8ff7bdc5539d127fad4df03b6215d9380f
707,998
def get_cognates(wordlist, ref): """ Retrieve cognate sets from a wordlist. """ etd = wordlist.get_etymdict(ref=ref) cognates = {} if ref == "cogids": for cogid, idxs_ in etd.items(): idxs, count = {}, 0 for idx, language in zip(idxs_, wordlist.cols): if idx: tks = wordlist[idx[0], "tokens"] cogidx = wordlist[idx[0], ref].index(cogid) idxs[language] = " ".join([ x.split("/")[1] if "/" in x else x for x in tks.n[cogidx]]) count += 1 else: idxs[language] = "" if count >= 2: cognates[cogid] = idxs elif ref == "cogid": for cogid, idxs_ in etd.items(): idxs, count = {}, 0 for idx, language in zip(idxs_, wordlist.cols): if idx: tks = wordlist[idx[0], "tokens"] idxs[language] = " ".join([x.split("/")[1] if "/" in x else x for x in tks]) count += 1 else: idxs[language] = "" if count >= 2: cognates[cogid] = idxs return cognates
bf64ecb8f2182dba06f0b28b384c0e66ba78d49e
707,999
def get_lessons_of_day(day): """ Returns the lessons as a string for the given day webelement :param day: day webelement :return: dictionary with day as key and list with lessons as value """ day_lessons = [] to_iterate = day.find_elements_by_class_name('event-content') to_iterate.reverse() for lesson in to_iterate: text = lesson.text day_lessons.append(text) return day_lessons
47b3ba18fd530ac8e724eb91e4b4d2886a008ac5
708,000
def bookShop(): """ Este programa resuelve el siguiente ejercicio: Book Shop Link: https://cses.fi/problemset/task/1158 Este programa retorna el máximo número de páginas que se pueden conseguir comprando libros dados el precio y páginas de los libros disponibles y la cantidad de dinero disponible. """ inputLine = input() inputArray = inputLine.split() inputArray = [int(x) for x in inputArray] numBooks = inputArray[0] totalPrice = inputArray[1] inputLine = input() inputArray = inputLine.split() inputArray = [int(x) for x in inputArray] prices = inputArray inputLine = input() inputArray = inputLine.split() inputArray = [int(x) for x in inputArray] pages = inputArray bag = [[0 for y in range(totalPrice + 1)] for x in range(numBooks + 1)] for i in range(1, len(bag)): price = prices[i - 1] page = pages[i - 1] for j in range(1, len(bag[0])): if j - price < 0: bag[i][j] = bag[i - 1][j] elif bag[i - 1][j - price] + page > bag[i - 1][j]: bag[i][j] = bag[i - 1][j - price] + page else: bag[i][j] = bag[i - 1][j] return bag[-1][-1]
52f2b3ca84c7d6db529f51e2c05ad4767d4466c7
708,001
import mpmath def pdf(x, nu, sigma): """ PDF for the Rice distribution. """ if x <= 0: return mpmath.mp.zero with mpmath.extradps(5): x = mpmath.mpf(x) nu = mpmath.mpf(nu) sigma = mpmath.mpf(sigma) sigma2 = sigma**2 p = ((x / sigma2) * mpmath.exp(-(x**2 + nu**2)/(2*sigma2)) * mpmath.besseli(0, x*nu/sigma2)) return p
b2d96bc19fb61e5aaf542b916d06c11a0e3dea46
708,002
def make_adder(n): """Return a function that takes one argument k and returns k + n. >>> add_three = make_adder(3) >>> add_three(4) 7 """ def adder(k): return k + n return adder
64808cb857f7bd17c8c81bfd749ed96efcc88a9f
708,004
import torch from typing import Union from typing import Tuple def groupby_apply( keys: torch.Tensor, values: torch.Tensor, bins: int = 95, reduction: str = "mean", return_histogram: bool = False ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Groupby apply for torch tensors Args: keys: tensor of groups (``0`` to ``bins``) values: values to aggregate - same size as keys bins: total number of groups reduction: either "mean" or "sum" return_histogram: if to return histogram on top Returns: tensor of size ``bins`` with aggregated values and optionally with counts of values """ if reduction == "mean": reduce = torch.mean elif reduction == "sum": reduce = torch.sum else: raise ValueError(f"Unknown reduction '{reduction}'") uniques, counts = keys.unique(return_counts=True) groups = torch.stack([reduce(item) for item in torch.split_with_sizes(values, tuple(counts))]) reduced = torch.zeros(bins, dtype=values.dtype, device=values.device).scatter(dim=0, index=uniques, src=groups) if return_histogram: hist = torch.zeros(bins, dtype=torch.long, device=values.device).scatter(dim=0, index=uniques, src=counts) return reduced, hist else: return reduced
711acc0cf2eb30e978f7f30686dbf67644d51fb0
708,005