content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def path_element_to_dict(pb): """datastore.entity_pb.Path_Element converter.""" return { 'type': pb.type(), 'id': pb.id(), 'name': pb.name(), }
2a4e757dedf6707dc412248f84b377c2f375e70c
2,678
def add(n): """Add 1.""" return n + 1
c62cee4660540ae62b5b73369bdeb56ccb0088d6
2,679
def parse_color(c, desc): """Check that a given value is a color.""" return c
ebabefbd56de120a753723f1dccb0f7c12af2fe6
2,680
def sortkey(d): """Split d on "_", reverse and return as a tuple.""" parts=d.split("_") parts.reverse() return tuple(parts)
1d8f8864a3d0bfd7dae8711bca183317e0f3fc0e
2,683
def first_n(m: dict, n: int): """Return first n items of dict""" return {k: m[k] for k in list(m.keys())[:n]}
57ccc9f8913c60c592b38211900fe8d28feffb4c
2,684
import pickle def save_calib(filename, calib_params): """ Saves calibration parameters as '.pkl' file. Parameters ---------- filename : str Path to save file, must be '.pkl' extension calib_params : dict Calibration parameters to save Returns ------- saved : bool Saved successfully. """ if type(calib_params) != dict: raise TypeError("calib_params must be 'dict'") output = open(filename, 'wb') try: pickle.dump(calib_params, output) except: raise IOError("filename must be '.pkl' extension") output.close() saved = True return saved
6735c8a6e96158b9fc580b6e61609b5ae7733fe0
2,685
def context_to_dict(context): """convert a django context to a dict""" the_dict = {} for elt in context: the_dict.update(dict(elt)) return the_dict
b319c6be4efa83c91eefa249c8be90824bc0158f
2,686
def create_P(P_δ, P_ζ, P_ι): """ Combine `P_δ`, `P_ζ` and `P_ι` into a single matrix. Parameters ---------- P_δ : ndarray(float, ndim=1) Probability distribution over the values of δ. P_ζ : ndarray(float, ndim=2) Markov transition matrix for ζ. P_ι : ndarray(float, ndim=1) Probability distribution over the values of ι. Returns ---------- P : ndarray(float, ndim=3) Joint probability distribution over the values of δ, ζ and ι. Probabilities vary by δ on the first axis, by ζ on the second axis, and by ι on the third axis. """ P = \ P_δ[:, None, None, None] * P_ζ[None, :, :, None] * \ P_ι[None, None, None, :] return P
0afdef50c50563421bb7c6f3f928fa6b3e5f4733
2,687
import typing def median(vals: typing.List[float]) -> float: """Calculate median value of `vals` Arguments: vals {typing.List[float]} -- list of values Returns: float -- median value """ index = int(len(vals) / 2) - 1 return sorted(vals)[index]
9f840d11409a570a718fdfe56d7a282af43bc798
2,688
import glob import os def get_files(data_path): """ 获取目录下以及子目录下的图片 :param data_path: :return: """ files = [] exts = ['jpg', 'png', 'jpeg', 'JPG','bmp'] for ext in exts: # glob.glob 得到所有文件名 # 一层 2层子目录都取出来 files.extend(glob.glob(os.path.join(data_path, '*.{}'.format(ext)))) files.extend(glob.glob(os.path.join(data_path, '*', '*.{}'.format(ext)))) return files
1a81aa7679eb2c70d29d3e80423c4b2e860c307d
2,689
def f_raw(x, a, b): """ The raw function call, performs no checks on valid parameters.. :return: """ return a * x + b
89bbe9e7a08e3bf4bf37c3efa695ed20fdca95c5
2,690
import collections def _generate_conversions(): """ Generate conversions for unit systems. """ # conversions to inches to_inch = {'microinches': 1.0 / 1000.0, 'mils': 1.0 / 1000.0, 'inches': 1.00, 'feet': 12.0, 'yards': 36.0, 'miles': 63360, 'angstroms': 1.0 / 2.54e8, 'nanometers': 1.0 / 2.54e7, 'microns': 1.0 / 2.54e4, 'millimeters': 1.0 / 2.54e1, 'centimeters': 1.0 / 2.54e0, 'meters': 1.0 / 2.54e-2, 'kilometers': 1.0 / 2.54e-5, 'decimeters': 1.0 / 2.54e-1, 'decameters': 1.0 / 2.54e-3, 'hectometers': 1.0 / 2.54e-4, 'gigameters': 1.0 / 2.54e-11, 'AU': 5889679948818.897, 'light years': 3.72461748e17, 'parsecs': 1.21483369e18} # if a unit is known by other symbols, include them here synonyms = collections.defaultdict(list) synonyms.update({'millimeters': ['mm'], 'inches': ['in', '"'], 'feet': ["'"], 'meters': ['m']}) # add non- plural version of units to conversions # eg, millimeters -> millimeter for key in to_inch.keys(): if key[-2:] == 'es' and key != 'miles': synonyms[key].append(key[:-2]) elif key[-1] == 's': synonyms[key].append(key[:-1]) # update the dict with synonyms for key, new_keys in synonyms.items(): value = to_inch[key] for new_key in new_keys: to_inch[new_key] = value # convert back to regular dictionary and make keys all lower case to_inch = {k.strip().lower(): v for k, v in to_inch.items()} return to_inch
8fa4f625e693fe352b2bba0082d0b18c46f5bec1
2,691
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf): """ Rather than attempting to merge files that were modified on both branches, it marks them as unresolved. The resolve command must be used to resolve these conflicts.""" return 1
278bb52f96e1a82ce9966626be08bc6fdd0df65d
2,692
import os def checkpoint_metrics_path(checkpoint_path, eval_name, file_name=None): """Gets a path to the JSON of eval metrics for checkpoint in eval_name.""" checkpoint_dir = os.path.dirname(checkpoint_path) checkpoint_name = os.path.basename(checkpoint_path) if eval_name: # This bit of magic is defined by the estimator framework, and isn't easy # to change. We only get to specify the suffix. checkpoint_dir = os.path.join(checkpoint_dir, 'eval_' + eval_name) if not file_name: return os.path.join(checkpoint_dir, checkpoint_name + '.metrics') return os.path.join(checkpoint_dir, file_name)
e176b873d13ae28f6a53100adba6ca437c4ce805
2,693
import json def label(vertex): """ Graph vertex label in dot format """ label = f"{vertex.name} {vertex.state or ''}\n{vertex.traceback or ''}" label = json.dumps(label).replace("\\n", r"\l") return f"[label={label}]"
a8604cfd837afbdba8b8ee7666d81df4b015ad2a
2,694
import re def extract_version(version_file_name): """Extracts the version from a python file. The statement setting the __version__ variable must not be indented. Comments after that statement are allowed. """ regex = re.compile(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]\s*(#.*)?$") with open(version_file_name, "r") as version_file: lines = version_file.read().splitlines() for line in reversed(lines): version_match = regex.match(line) if version_match: return version_match.group(1) else: raise RuntimeError("Unable to find version string.")
1cc70ba4bf69656bb8d210a49c236e38eba59513
2,698
import json import uuid def build_request_data(useralias, req_node): """build_request_data :param useralias: user alias for directory name :param req_node: simulated request node """ if "file" not in req_node: return None use_uniques = req_node["unique_names"] use_file = req_node["file"].format( useralias) use_data = json.loads(open(use_file, 'r').read()) if use_uniques: if "title" in use_data: use_data["title"] = "{}_{}".format( use_data["title"], str(uuid.uuid4())) if "full_file" in use_data: use_data["full_file"] = \ use_data["full_file"].format( str(uuid.uuid4())) if "clean_file" in use_data: use_data["clean_file"] = \ use_data["clean_file"].format( str(uuid.uuid4())) if "csv_file" in use_data: use_data["csv_file"] = \ use_data["csv_file"].format( str(uuid.uuid4())) if "meta_file" in use_data: use_data["meta_file"] = \ use_data["meta_file"].format( str(uuid.uuid4())) if "meta_suffix" in use_data: use_data["meta_suffix"] = \ use_data["meta_suffix"].format( str(uuid.uuid4())) return use_data
938c79c290e1e4c086e6d48f71cbd0b965d36b36
2,699
def _get_stmt_lists(self): """ Returns a tuple of the statement lists contained in this `ast.stmt` node. This method should only be called by an `ast.stmt` node. """ if self.is_simple(): return () elif self.is_body(): return (self.body,) elif self.is_body_orelse(): return (self.body, self.orelse) elif self.is_body_finally(): return (self.body, self.finalbody) else: # Every statement has to be simple or complex. assert(False)
0ec85481bc4261ae77ced0ae32c72081ef80c651
2,700
import networkx def nx_find_connected_limited(graph, start_set, end_set, max_depth=3): """Return the neurons in end_set reachable from start_set with limited depth.""" reverse_graph = graph.reverse() reachable = [] for e in end_set: preorder_nodes = list( ( networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes( reverse_graph, source=e, depth_limit=max_depth ) ) ) for s in start_set: if s in preorder_nodes: reachable.append(e) break return reachable
4322f4231be73b575d05442f09608c71c3b9f605
2,701
def hexbyte_2integer_normalizer(first_int_byte, second_int_btye): """Function to normalize integer bytes to a single byte Transform two integer bytes to their hex byte values and normalize their values to a single integer Parameters __________ first_int_byte, second_int_byte : int integer values to normalize (0 to 255) Returns _______ integer: int Single normalized integer """ first_hex = f'{hex(first_int_byte)}'.lstrip('0x') second_hex = f'{hex(second_int_btye)}'.lstrip('0x') first_hex = first_hex if len(f'{first_hex}') == 2 else f'0{first_hex}' second_hex = second_hex if len(f'{second_hex}') == 2 else f'0{second_hex}' hex_string = f'{first_hex}{second_hex}' integer = int(hex_string, 16) return integer
a3bbe75014b6e08607314b615440039bab245f04
2,702
def address_repr(buf, reverse: bool = True, delimit: str = "") -> str: """Convert a buffer into a hexlified string.""" order = range(len(buf) - 1, -1, -1) if reverse else range(len(buf)) return delimit.join(["%02X" % buf[byte] for byte in order])
6b4b8921d6280cd688c3bfcfca82b2b5546001e7
2,703
def get_correct_line(df_decisions): """ The passed df has repeated lines for the same file (same chemin_source). We take the most recent one. :param df_decisions: Dataframe of decisions :return: Dataframe without repeated lines (according to the chemin_source column) """ return df_decisions.sort_values('timestamp_modification').drop_duplicates('chemin_source', keep='last')
989f1aba1c5e0c61f8b7ca1c883baf4dd181ebbc
2,704
def fix_1(lst1, lst2): """ Divide all of the elements in `lst1` by each element in `lst2` and return the values in a list. >>> fix_1([1, 2, 3], [0, 1]) [1.0, 2.0, 3.0] >>> fix_1([], []) [] >>> fix_1([10, 20, 30], [0, 10, 10, 0]) [1.0, 2.0, 3.0, 1.0, 2.0, 3.0] """ out = [] for div in lst2: for num in lst1: try: out.append(num / div) # add try-except block except ZeroDivisionError: pass return out
7929cfc19952a829c66c18af967668d1015f8477
2,705
def user_wants_upload(): """ Determines whether or not the user wants to upload the extension :return: boolean """ choice = input("Do you want to upload your extension right now? :") if "y" in choice or "Y" in choice: return True else: return False
67643d1ccf8d1ffe23ddc503cd8e9f4dc4e98707
2,706
def get_service(vm, port): """Return the service for a given port.""" for service in vm.get('suppliedServices', []): if service['portRange'] == port: return service
d617771c25c69ee874b0bc64adcc735aa876f929
2,707
def _project(doc, projection): """Return new doc with items filtered according to projection.""" def _include_key(key, projection): for k, v in projection.items(): if key == k: if v == 0: return False elif v == 1: return True else: raise ValueError('Projection value must be 0 or 1.') if projection and key != '_id': return False return True return {k: v for k, v in doc.items() if _include_key(k, projection)}
0f2cd190e73b39ceeec0f850054baab1dd357587
2,708
import random def random_swap(words, n): """ Randomly swap two words in the sentence n times Args: words ([type]): [description] n ([type]): [description] Returns: [type]: [description] """ def swap_word(new_words): random_idx_1 = random.randint(0, len(new_words) - 1) random_idx_2 = random_idx_1 counter = 0 while random_idx_2 == random_idx_1: random_idx_2 = random.randint(0, len(new_words) - 1) counter += 1 if counter > 3: return new_words new_words[random_idx_1], new_words[random_idx_2] = ( new_words[random_idx_2], new_words[random_idx_1], ) return new_words new_words = words.copy() for _ in range(n): new_words = swap_word(new_words) return new_words
d6916404c363176f13010d006cd61354dcd4e16e
2,709
def cli(ctx, newick, analysis_id, name="", xref_db="null", xref_accession="", match_on_name=False, prefix=""): """Load a phylogenetic tree (Newick format) into Chado db Output: Number of inserted trees """ return ctx.gi.phylogeny.load_tree(newick, analysis_id, name=name, xref_db=xref_db, xref_accession=xref_accession, match_on_name=match_on_name, prefix=prefix)
9b68dec5584a692f2fe04746d9bb179c9e002682
2,711
import os def extract_node_name(path, ignore_missing_nodes=False): """extracts the token after the 'nodes'""" tokens = path.split(os.sep) last_nodes_index = -1 for i, token in enumerate(tokens): if token == "nodes": last_nodes_index = i if last_nodes_index == -1: if ignore_missing_nodes: return path raise "path '%s' does not contain 'nodes' and " + "is not a valid diag tarball, so cannot determine the node" % path try: # we're interested in getting the token after nodes return tokens[last_nodes_index + 1] except IndexError: raise "there is nothing after the 'nodes' entry of '%s'" % path
0d81e46ef2812e5b087fdef5264ad20a3f3bef2d
2,712
import requests import json def folder0_content(folder0_id, host, token): """ Modules ------- request, json ---------- Parameters ---------- folder0_id : Onedata folder level 0 id containing the data to publish. host : OneData provider (e.g., ceta-ciemat-02.datahub.egi.eu). token : OneData personal access token. ------- Returns ------- all_level0: "name" and "id" of the folders contained in the folder defined by "folder0_id" """ OneData_urlchildren = "https://" + host + '/api/v3/oneprovider/data/' + folder0_id + "/children" request_param = {'X-Auth-Token': token} r_level0 = requests.get(OneData_urlchildren, headers=request_param) all_level0 = json.loads(r_level0.text) return (all_level0)
8ce6ae617666f936643b9599ae115e140b30bd2b
2,713
def partition_pair(bif_point): """Calculate the partition pairs at a bifurcation point. The number of nodes in each child tree is counted. The partition pairs is the number of bifurcations in the two child subtrees at each branch point. """ n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return (n, m)
7889eb95a0ac3b2a7d1138061a4651b1e79427c0
2,716
def longest_CD(values): """ Return the sequence range for the longest continuous disorder (CDl) subsequence. """ # Filter residues with score equal or greater than 0.5 # and store its position index dis_res = [index for index, res in enumerate(values) if float(res) >= 0.5] # Initialize longest CD region CDl = [] # Counter to store partial results of each continuous region c = [] # Iterate over disordered residues list for i, j in zip(dis_res, dis_res[1:]): # Check if residues are consecutive if j - i == 1: # Update counter c.append(i) # Not consecutive else: # Add last residue of the interval c.append(i) # Update CDl if len(c) > len(CDl): CDl = c # Reset counter for the next interval c = [] return CDl
f07b74b9553c156d2d4b62e17ea02b466a16fe74
2,717
def _set_user_permissions_for_volumes(users, volumes): """ Returns the section of the user data script to create a Linux user group and grant the group permission to access the mounted volumes on the EC2 instance. """ group_name = 'volumes' user_data_script_section = f""" groupadd {group_name} """ for user in users: user_data_script_section += f""" usermod -a -G {group_name} {user.login} """ for volume in volumes: user_data_script_section += f""" chgrp -R {group_name} {volume.mount} chmod -R 2775 {volume.mount} """ return user_data_script_section
2d262a52cfa2f3e142da3dd7767dcc6cff14c929
2,719
def search4letters(phrase, letters='aeiou'): """ ->return a set of the 'letters' found in 'phrase'. :param phrase: phrase where the search will be made :param letters:set of letters that will be searched for in the sentence :return returns a set () """ return set(letters).intersection(set(phrase))
e58d0863aa090ac3644cd7bf26e783efe2956d35
2,720
import math def psubl_T(T): """ EQ 6 / Sublimation Pressure """ T_star = 273.16 p_star = 611.657E-6 a = (-0.212144006E2, 0.273203819E2, -0.610598130E1) b = ( 0.333333333E-2, 0.120666667E1, 0.170333333E1) theta = T / T_star sum = 0 for i in range(0, 3): sum += a[i] * theta ** b[i] pi_subl = math.exp((theta ** -1) * sum) return pi_subl * p_star
0e3f875fc2d249c78a5db6268dcc0df31213a7ff
2,724
def tab(num): """ Get tab indentation. Parameters ---------- num : int indentation depth """ return num * 4 * " "
39311a9f28aa70f105271432916745dddeb0b46a
2,725
import requests import logging def odata_getone(url, headers): """ Get a single object from Odata """ r = requests.get(url, headers=headers) if not r.ok: logging.warning(f"Fetch url {url} hit {r.status_code}") return None rjson = r.json() if 'error' in rjson: logging.warning(f"Fetching of {url} returned error {r.text}") return None return rjson
5d6c668845132d821f175a2e8c1a924492a9eb2f
2,727
def sparse_search(arr, s): """ 10.5 Sparse Search: Given a sorted array of strings that is interspersed with empty strings, write a method to find the location of a given string. EXAMPLE: Input: find "ball" in {"at", "", "", "" , "ball", "", "", "car", "" , "" , "dad", ""} Output: 4 """ def spread(arr, middle, left, right): k = 1 while middle - k >= left and middle + k <= right: if arr[middle - k] != "": return middle - k if arr[middle + k] != "": return middle + k k += 1 return middle def rec_sparse_search(arr, s, left, right): if left > right: return None middle = (left + right) / 2 if arr[middle] == "": new_middle = spread(arr, middle, left, right) if new_middle == middle: return None middle = new_middle if arr[middle] == s: return middle if arr[middle] < s: return rec_sparse_search(arr, s, left, middle - 1) return rec_sparse_search(arr, s, middle + 1, right) return rec_sparse_search(arr, s, 0, len(arr) - 1)
605a56c518539117a83382c9e73d37d5e56b535f
2,728
import uuid def uuid_pk(): """ Generate uuid1 and cut it to 12. UUID default size is 32 chars. """ return uuid.uuid1().hex[:12]
9efb12a6e72b02adcd4a64ca721ceab8c688055a
2,729
import os import re def scrape_md_file(md_path): """ Yield the Python scripts and URLs in the md_file in path. Parameters ---------- md_path : str path to md file to scrape Returns ------- python_examples : List[str] The list of Python scripts included in the provided file. urls : """ # check there is a README in that folder if not os.path.isfile(md_path): return [], [] with open(md_path, 'r') as f: readme_content = f.read() pythons = re.findall('```python(.*?)```', readme_content, flags=re.DOTALL) urls = re.findall('http[s]?://(?:[0-9a-zA-Z]|[-/.%:_])+', readme_content) return pythons, urls
afac5538a469dafb06dfd2df40a28be5284b61be
2,732
import os import glob import shutil def copy_files(extension, source, target=None): """Copy matching files from source to target. Scan the ``source`` folder and copy any file that end with the given ``extension`` to the ``target`` folder. Both ``source`` and ``target`` are expected to be either a ``str`` or a list or tuple of strings to be joined using ``os.path.join``. ``sourec`` will be interpreted as a path relative to the ``atm`` root code folder, and ``target`` will be interpreted as a path relative to the user's current working directory. If ``target`` is ``None``, ``source`` will be used, and if the ``target`` directory does not exist, it will be created. Args: extension (str): File extension to copy. source (str or iterabe): Source directory. target (str or iterabe or None): Target directory. Defaults to ``None``. Returns: dict: Dictionary containing the file names without extension as keys and the new paths as values. """ if isinstance(source, (list, tuple)): source = os.path.join(*source) if isinstance(target, (list, tuple)): target = os.path.join(*target) elif target is None: target = source source_dir = os.path.join(os.path.dirname(__file__), source) target_dir = os.path.join(os.getcwd(), target) if not os.path.exists(target_dir): os.makedirs(target_dir) file_paths = dict() for source_file in glob.glob(os.path.join(source_dir, '*.' + extension)): file_name = os.path.basename(source_file) target_file = os.path.join(target_dir, file_name) print('Generating file {}'.format(target_file)) shutil.copy(source_file, target_file) file_paths[file_name[:-(len(extension) + 1)]] = target_file return file_paths
5b6ae6a908448487206612e7686e573c266bc287
2,733
import fnmatch def findmatch(members,classprefix): """Find match for class member.""" lst = [n for (n,c) in members] return fnmatch.filter(lst,classprefix)
05038eb4796161f4cc64674248473c01fd4b13aa
2,734
def is_narcissistic(number): """Must return True if number is narcissistic""" return sum([pow(int(x), len(str(number))) for x in str(number)]) == number
b94486d4df52b7108a1c431286e7e86c799abf58
2,735
import torch def get_data(generic_iterator): """Code to get minibatch from data iterator Inputs: - generic_iterator; iterator for dataset Outputs: - data; minibatch of data from iterator """ data = next(generic_iterator) if torch.cuda.is_available(): data = data.cuda() return data
364151694fb452279691986f5533e182a8b905f3
2,737
from datetime import datetime import pytz def isotime(timestamp): """ISO 8601 formatted date in UTC from unix timestamp""" return datetime.fromtimestamp(timestamp, pytz.utc).isoformat()
f6a922d75a186e26f158edc585691e31bf430b01
2,738
def check_model_consistency(model, grounding_dict, pos_labels): """Check that serialized model is consistent with associated json files. """ groundings = {grounding for grounding_map in grounding_dict.values() for grounding in grounding_map.values()} model_labels = set(model.estimator.named_steps['logit'].classes_) consistent_labels = groundings <= model_labels shortforms = set(grounding_dict.keys()) model_shortforms = set(model.shortforms) consistent_shortforms = shortforms == model_shortforms model_labels = set(model.estimator.named_steps['logit'].classes_) consistent_pos_labels = set(pos_labels) <= model_labels return consistent_labels and consistent_shortforms and \ consistent_pos_labels
b5d1beda0be5ceccec158839c61c1d79349596ef
2,739
def _get_index_sort_str(env, name): """ Returns a string by which an object with the given name shall be sorted in indices. """ ignored_prefixes = env.config.cmake_index_common_prefix for prefix in ignored_prefixes: if name.startswith(prefix) and name != prefix: return name[len(prefix):] return name
cdf7a509ef8f49ff15cac779e37f0bc5ab98c613
2,740
import requests def tmdb_find_movie(movie: str, tmdb_api_token: str): """ Search the tmdb api for movies by title Args: movie (str): the title of a movie tmdb_api_token (str): your tmdb v3 api token Returns: dict """ url = 'https://api.themoviedb.org/3/search/movie?' params = {'query': movie, 'language': 'en-US', 'api_key': tmdb_api_token, } return requests.get(url, params).json()
ea676fbb91f451b20ce4cd2f7258240ace3925b3
2,742
def _checkerror(fulloutput): """ Function to check the full output for known strings and plausible fixes to the error. Future: add items to `edict` where the key is a unique string contained in the offending output, and the data is the reccomended solution to resolve the problem """ edict = {'multiply': ('NOTE: you might(?) need to clean the `tmp/` folder!'), 'already defined': ('NOTE: you probably (might?) need to clean the `tmp/` folder!'), 'unresolved externals': ('NOTE: consider recompiling the linked libraries to' 'have the correct name mangling for cl.exe:' 'ifort: /names:lowercase /assume:underscore '), "KeyError: 'void'": ('There may be an issue with public/private function ' 'definitions or a missing variable definition in the last ' 'function listed above. For the first error consider using ' 'the parameter `functiondict` or checking to ensure all ' 'module functions are public... For the second error, check ' 'that all of the parameters in the subroutine are defined'), "No such file or directory": ('There may be a space in the path to one of the ' 'source code or library folders'), "LINK : fatal error LNK1104: cannot open file": ('The pyd is currently in use, ' 'restart any kernels using it !') } # iterate through the keys in the error dictionary and see if the key is in the full output extramessage = '' for error_key in edict.keys(): if error_key in fulloutput: extramessage = edict[error_key] return extramessage
5312beff6f998d197a3822e04e60d47716520f50
2,743
def findAnEven(L): """ :Assumes L is a list of integers: :Returns the first even number in L: :Raises ValueError if L does not contain an even number: """ for num in L: if num % 2 == 0: return num raise ValueError
93f7854bd376d52df40b23d21bfde784db124106
2,744
def errorString(node, error): """ Format error messages for node errors returned by checkLinkoStructure. inputs: node - the node for the error. error - a (backset, foreset) tuple, where backset is the set of missing backlinks and foreset is the set of missing forelinks. returns: string string - the error string message. """ back, fore = error[0], error[1] if len(back) == 0: back = 'None' if len(fore) == 0: fore = 'None' return ('Node {0}: missing backlinks {1},' ' missing forelinks {2}').format(node, back, fore)
df87b7838ed84fe4e6b95002357f616c96d04ad0
2,745
def deep_update(target, source): """ Deep merge two dicts """ if isinstance(source, dict): for key, item in source.items(): if key in target: target[key] = deep_update(target[key], item) else: target[key] = source[key] return target
5db0c6fa31f3d4408a359d90dbf6e50dfdc12cdc
2,746
def _Backward3a_T_Ps(P, s): """Backward equation for region 3a, T=f(P,s) Parameters ---------- P : float Pressure [MPa] s : float Specific entropy [kJ/kgK] Returns ------- T : float Temperature [K] References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 6 Examples -------- >>> _Backward3a_T_Ps(20,3.8) 628.2959869 >>> _Backward3a_T_Ps(100,4) 705.6880237 """ I = [-12, -12, -10, -10, -10, -10, -8, -8, -8, -8, -6, -6, -6, -5, -5, -5, -4, -4, -4, -2, -2, -1, -1, 0, 0, 0, 1, 2, 2, 3, 8, 8, 10] J = [28, 32, 4, 10, 12, 14, 5, 7, 8, 28, 2, 6, 32, 0, 14, 32, 6, 10, 36, 1, 4, 1, 6, 0, 1, 4, 0, 0, 3, 2, 0, 1, 2] n = [0.150042008263875e10, -0.159397258480424e12, 0.502181140217975e-3, -0.672057767855466e2, 0.145058545404456e4, -0.823889534888890e4, -0.154852214233853, 0.112305046746695e2, -0.297000213482822e2, 0.438565132635495e11, 0.137837838635464e-2, -0.297478527157462e1, 0.971777947349413e13, -0.571527767052398e-4, 0.288307949778420e5, -0.744428289262703e14, 0.128017324848921e2, -0.368275545889071e3, 0.664768904779177e16, 0.449359251958880e-1, -0.422897836099655e1, -0.240614376434179, -0.474341365254924e1, 0.724093999126110, 0.923874349695897, 0.399043655281015e1, 0.384066651868009e-1, -0.359344365571848e-2, -0.735196448821653, 0.188367048396131, 0.141064266818704e-3, -0.257418501496337e-2, 0.123220024851555e-2] Pr = P/100 sigma = s/4.4 suma = 0 for i, j, ni in zip(I, J, n): suma += ni * (Pr+0.240)**i * (sigma-0.703)**j return 760*suma
cb0b9b55106cf771e95505c00043e5772faaef40
2,748
def format_dB(num): """ Returns a human readable string of dB. The value is divided by 10 to get first decimal digit """ num /= 10 return f'{num:3.1f} {"dB"}'
13d6313834333ee2ea432cf08470b6ce1efe1ad6
2,749
def _extractKernelVersion(kernel): """ Extract version string from raw kernel binary. @param bytes kernel Raw kernel binary. @return string Version string if found. """ try: versionOffset = kernel.index(b'Linux version') for i in range(versionOffset, versionOffset+1024): if kernel[i]==0x00: return kernel[versionOffset:i] return None except IndexError as exc: return None
f32e995a4a16376b26b0e1d5af826f2f0e71df87
2,751
import os def rel_path(path, parent_path): """Return path relative to parent_path.""" # Use realpath to avoid issues with symlinked dirs (see gh-7707) pd = os.path.realpath(os.path.abspath(parent_path)) apath = os.path.realpath(os.path.abspath(path)) if len(apath) < len(pd): return path if apath == pd: return '' if pd == apath[:len(pd)]: assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) path = apath[len(pd)+1:] return path
8424aa6393778234f71858a816b7375b845c42b2
2,752
import time def sleeping_func(arg, secs=10, result_queue=None): """This methods illustrates how the workers can be used.""" time.sleep(secs) if result_queue is not None: result_queue.put(arg) else: return arg
c15dfac46f9b47fcc82ff539116ecc683a593b9c
2,753
import sys def suffix(s): """Add '3' suffix to programs for Python 3.""" if sys.version_info[0] == 3: s = s + '3' return s
0ba1495032e57553adf97d7aa49a85e110c1acf0
2,754
import os def get_fsuae_dir(): """Get FS-UAE dir""" user_home_dir = os.path.expanduser('~') directories = [os.path.join(user_home_dir, _f) for _f in os.listdir(user_home_dir) \ if os.path.isdir(os.path.join(user_home_dir, _f))] for directory in directories: fsuae_dir = os.path.join(directory, 'FS-UAE') fsuae_config_dir = os.path.join(fsuae_dir, 'Configurations') if os.path.isdir(fsuae_config_dir): return fsuae_dir return None
b3cbcea6449c4a8836304bc0cb68f1db502f7a8e
2,756
def args_for_blocking_web_whatsapp_com_http(): """ Returns arguments for blocking web.whatsapp.com over http """ return ["-iptables-reset-keyword", "Host: web.whatsapp.com"]
a15a8ebc087467ec1a8e6817366f93df7b0a181b
2,757
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None): """Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn :param k: int fourier term :param col_name: str column in the dataframe used to generate fourier series :param function_name: str sin or cos :param seas_name: strcols_interact appended to new column names added for fourier terms :return: str column name in DataFrame returned by fourier_series_fcn """ # patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms name = f"{function_name}{k:.0f}_{col_name}" if seas_name is not None: name = f"{name}_{seas_name}" return name
5c15b52728d0333c9c7df59030d6ead66473c823
2,758
import uuid def unique_filename(): """Creates a UUID-based unique filename""" return str(uuid.uuid1())
ee0d9090a4c5f8a6f0ddef2d670f7beb845a4114
2,759
def build_binary_value(char_str, bits, alphabet) -> str: """ This method converts a string char_str into binary, using n bits per character and decoding from the supplied alphabet or from ASCII when bits=7 This is almost the inverse method to build_string in the decompress module. :param char_str: string. :param bits: number of bits per character. :param alphabet: Alphabet. :return: binary value. """ if bits == 7: indices = [ord(char_) for char_ in char_str] else: indices = [alphabet.index(char_) for char_ in char_str] binary_char_list = ["{0:b}".format(index).zfill(bits) for index in indices] return ''.join(binary_char_list)
50830dd5cfa3f5428b0946e7382220f9b5ff1915
2,761
def computeAnswer(inputData): """Compute the answer to the task, from the input data.""" # Do some calculations on the inputData answer = str(int(inputData) * 2) # EDIT ME (remove this line once done) return answer
3bf90dc1c05ca422ffda70d8a053eb76f6dcc66b
2,762
def irange(start, end): """Inclusive range from start to end (vs. Python insanity.) irange(1,5) -> 1, 2, 3, 4, 5""" return range( start, end + 1 )
91d4c270b1d9304b4ee82c0cb16aee5d518db3d5
2,763
import os import re def _make_rel_url_path(src, dst): """src is a file or dir which wants to adress dst relatively, calculate the appropriate path to get from here to there.""" srcdir = os.path.abspath(src + "/..") dst = os.path.abspath(dst) # For future reference, I hate doing dir munging with string operations # with a fiery passion, but pragmatism won out over making a lib.. . common = os.path.commonprefix((srcdir, dst)) reldst = dst[len(common):] srcdir = srcdir[len(common):] newpath = re.sub(""".*?[/\\\]|.+$""", "../", srcdir) or "./" newpath = newpath + reldst newpath = newpath.replace("\\", "/") newpath = newpath.replace("//", "/") return newpath
39c6d5b4ec42b61d13fe3229f869bc6e1b823ec3
2,764
import glob import os import json def dataset_source_xnat(bids_dir): """ Method to check if the data was downloaded from xnat :param bids_dir: BIDS Directory :return: True or False """ dataset_description_file = glob.glob(bids_dir + "/**/dataset_description.json", recursive = True) if not os.path.exists(dataset_description_file[0]): return False else: with open(dataset_description_file[0], 'r') as f: json_contents = json.load(f) if 'DatasetDOI' not in json_contents: return False elif not json_contents['DatasetDOI'].endswith('xnat'): return False return True
f0970308a14f5c4f2b152891c115428be666d3f9
2,765
import json def _json_keyify(args): """ converts arguments into a deterministic key used for memoizing """ args = tuple(sorted(args.items(), key=lambda e: e[0])) return json.dumps(args)
2800a9a0db0cf8d51efbcbeda2c023172f6662f5
2,768
def choose_a_pick_naive(numbers_left): """ Choose any larger number :param numbers_left: :return: """ if numbers_left[0] > numbers_left[-1]: return 0, numbers_left[0] elif numbers_left[-1] > numbers_left[0]: return -1, numbers_left[-1] else: return 0, numbers_left[0]
70405a4ad9d1ee1afbec93bea13d7eab3068b42e
2,769
import unicodedata def sanitize_str(value: str) -> str: """Removes Unicode control (Cc) characters EXCEPT for tabs (\t), newlines (\n only), line separators (U+2028) and paragraph separators (U+2029).""" return "".join(ch for ch in value if unicodedata.category(ch) != 'Cc' and ch not in {'\t', '\n', '\u2028', '\u2029'})
5b5eae2b377a834e377a8bf7bcd7cefc2278c2f7
2,771
import time def date_format(time_obj=time, fmt='%Y-%m-%d %H:%M:%S') -> str: """ 时间转字符串 :param time_obj: :param fmt: :return: """ _tm = time_obj.time() _t = time.localtime(_tm) return time.strftime(fmt, _t)
0a614763b040587b80743ffacfff6bbb0a6c7365
2,772
from typing import Optional def clean_pin_cite(pin_cite: Optional[str]) -> Optional[str]: """Strip spaces and commas from pin_cite, if it is not None.""" if pin_cite is None: return pin_cite return pin_cite.strip(", ")
9c495fcc4f1cf192c1358f50fef569c4d6b36290
2,773
import json def get_json_dump(json_object, indent=4, sort_keys=False): """ Short handle to get a pretty printed str from a JSON object. """ return json.dumps(json_object, indent=indent, sort_keys=sort_keys)
505548cdf972ef891b7bcc3bcd7be3347769faec
2,774
def heap_sort(arr: list): """ Heap sorting a list. Big-O: O(n log n). @see https://www.geeksforgeeks.org/heap-sort/ """ def heapify(sub: list, rdx: int, siz: int): """ Heapifying range between rdx and size ([rdx:siz]). @param sub: a slice of list. @param rdx: root/parent index to start. @param siz: size of heap. """ largest = ndx = rdx # assuming the root is the largest while ndx < siz: l_index = 2 * ndx + 1 # child index at left = 2*i + 1 r_index = 2 * ndx + 2 # child index at right = 2*i + 2 # reset largest index if left child exists and is greater than root. if l_index < siz and sub[ndx] < sub[l_index]: largest = l_index # check if right child is greater than the value at the largest index. if r_index < siz and sub[largest] < sub[r_index]: largest = r_index # change root, if needed if largest != ndx: sub[ndx], sub[largest] = sub[largest], sub[ndx] # swap ndx = largest # heapify the root. continue return pass n = len(arr) # build a max heap. parent = n // 2 - 1 # the last parent (that can have children) for i in range(parent, -1, -1): heapify(arr, i, n) # extract elements one by one. for i in range(n-1, 0, -1): arr[i], arr[0] = arr[0], arr[i] # swap heapify(arr, 0, i) return arr
9b53f3027804cab16c9850d4858377f49afe7bbf
2,775
def find_max_path(triangle): """ Find maximum-sum path from top of triangle to bottom """ # Start by copying the values sums = [[x for x in row] for row in triangle] # Efficient algorithm: start at the bottom and work our way up, computing max sums for reverse_index, row in enumerate(reversed(sums)): if reverse_index == 0: # Easy: max value for subpaths from last row is cell value itself continue # Now we need to take sum of each cell and max of two subpaths row_below = sums[-reverse_index] for col_index, col in enumerate(row): left = row_below[col_index] right = row_below[col_index + 1] row[col_index] = col + max(left, right) return sums[0][0]
1eb0afd076c455e67eacc867d04020ae82c68936
2,776
def prompt_for_password(prompt=None): """Fake prompt function that just returns a constant string""" return 'promptpass'
49499970c7698b08f38078c557637907edef3223
2,777
def get_frame_list(video, jump_size = 6, **kwargs): """ Returns list of frame numbers including first and last frame. """ frame_numbers =\ [frame_number for frame_number in range(0, video.frame_count, jump_size)] last_frame_number = video.frame_count - 1; if frame_numbers[-1] != last_frame_number: frame_numbers.append(last_frame_number) return frame_numbers
786de04b4edf224045216de226ac61fdd42b0d7b
2,778
def f_bis(n1 : float, n2 : float, n3 : float) -> str: """ ... cf ci-dessus ... """ if n1 < n2: if n2 < n3: return 'cas 1' elif n1 < n3: return 'cas 2' else: return 'cas 5' elif n1 < n3: return 'cas 3' elif n2 < n3: return 'cas 4' else: return 'cas 6'
e46c147a5baef02878700e546b11b7ae44b8909a
2,782
def obter_forca (unidade): """Esta funcao devolve a forca de ataque da unidade dada como argumento""" return unidade[2]
34fe4acac8e0e3f1964faf8e4b26fa31148cf2a6
2,783
import os def get_env_string(env_key, fallback): """ reads boolean literal from environment. (does not use literal compilation as far as env returns always a string value Please note that 0, [], {}, '' treats as False :param str env_key: key to read :param str fallback: fallback value :rtype: str :return: environment value typed in string """ assert isinstance(fallback, str), "fallback should be str instance" return os.environ.get(env_key) or fallback
af136c32b22b1cad30a65e517828dfaf01cb597d
2,784
import subprocess def testing_submodules_repo(testing_workdir, request): """Initialize a new git directory with two submodules.""" subprocess.check_call(['git', 'init']) # adding a commit for a readme since git diff behaves weird if # submodules are the first ever commit subprocess.check_call(['touch', 'readme.txt']) with open('readme.txt', 'w') as readme: readme.write('stuff') subprocess.check_call(['git', 'add', '.']) subprocess.check_call(['git', 'commit', '-m', 'Added readme']) subprocess.check_call(['git', 'submodule', 'add', 'https://github.com/conda-forge/conda-feedstock.git']) subprocess.check_call(['git', 'submodule', 'add', 'https://github.com/conda-forge/conda-build-feedstock.git']) subprocess.check_call(['git', 'add', '.']) subprocess.check_call(['git', 'commit', '-m', 'Added conda and cb submodules']) # a second commit, for testing trips back in history subprocess.check_call(['git', 'submodule', 'add', 'https://github.com/conda-forge/conda-build-all-feedstock.git']) subprocess.check_call(['git', 'add', '.']) subprocess.check_call(['git', 'commit', '-m', 'Added cba submodule']) return testing_workdir
3a09e30447d7ebdb041ff1d4ad7f28c8483db41a
2,785
import os from sys import path def run_config_filename(conf_filename): """ Runs xNormal using the path to a configuration file. """ retcode = os.system("\"%s\" %s" % (path, conf_filename)) return retcode
a68fdbefc4e6d4459073243996d0706111ec4a36
2,786
def celsius_to_fahrenheit(temperature_C): """ converts C -> F """ return temperature_C * 9.0 / 5.0 + 32.0
47c789c560c5b7d035252418bd7fb0819b7631a4
2,787
import itertools def strip_translations_header(translations: str) -> str: """ Strip header from translations generated by ``xgettext``. Header consists of multiple lines separated from the body by an empty line. """ return "\n".join(itertools.dropwhile(len, translations.splitlines()))
b96c964502724008306d627d785224be08bddb86
2,789
import requests import random def handle(req): """handle a request to the function Args: req (str): request body """ r = requests.get("http://api.open-notify.org/astros.json") result = r.json() index = random.randint(0, len(result["people"]) - 1) name = result["people"][index]["name"] return "{} is in space".format(name)
7d951443bc5b6f3db86602d635a8c9ce84b703fb
2,791
def main(stdin): """ Take sorted standard in from Hadoop and return lines. Value is just a place holder. """ for line_num in stdin: # Remove trailing newlines. line_num = line_num.rstrip() # Omit empty lines. try: (line, num) = line_num.rsplit('\t', 1) print(("{line}\t{num}").format(line=line, num=num)) except ValueError: pass return None
811e184d9425c1c76681c823b463b99ebde2c25c
2,793
def param_is_numeric(p): """ Test whether any parameter is numeric; functionally, determines if any parameter is convertible to a float. :param p: An input parameter :return: """ try: float(p) return True except ValueError: return False
b92579ba019389cf21002b63ca6e2ebdfad7d86f
2,794
def find_attachments(pattern, cursor): """Return a list of attachments that match the specified pattern. Args: pattern: The path to the attachment, as a SQLite pattern (to be passed to a LIKE clause). cursor: The Cursor object through which the SQLite queries are sent to the Zotero database. Returns: A list of (parentItemID, path) pairs that match the specified pattern. The returned list is empty if no matches are found. """ query = 'SELECT parentItemID, path FROM itemAttachments WHERE path LIKE ?' cursor.execute(query, (pattern,)) return list(cursor)
614649f6fd5972b026b191bb1a272e270dedffe5
2,795
def model_fields_map(model, fields=None, exclude=None, prefix='', prefixm='', attname=True, rename=None): """ На основании переданной модели, возвращает список tuple, содержащих путь в орм к этому полю, и с каким именем оно должно войти в результат. Обрабатываются только обычные поля, m2m и generic сюда не войдут. ARGUMENTS: :param model: модель или инстанс модели, на основе которой будет формироваться список полей :param None | collections.Container fields: список полей, которые будут забраны из модели :param None | collections.Container exclude: список полей, которые не будут забираться :param str prefix: ORM путь, по которому будут распологаться модель в запросе :param str prefixm: префикс, который будет добавлен к имени поля :param bool attname: использовать имя name (model) или attname(model_id) эти поля отличаются для внешних ключей :param dict rename: словарь переименования полей :rtype: list[tuple[str]] """ data = [] rename = rename or {} attribute = 'attname' if attname else 'name' for f in model._meta.concrete_fields: if fields and f.attname not in fields and f.name not in fields: continue if exclude and f.attname in exclude and f.name not in exclude: continue param_name = getattr(f, attribute) new_param_name = rename[param_name] if param_name in rename else param_name data.append(('{}{}'.format(prefix, param_name), '{}{}'.format(prefixm, new_param_name))) return data
812247543e5f714e0d2ef57cf018b0741679f83e
2,796
import math def sphere_mass(density,radius): """Usage: Find the mass of a sphere using density and radius""" return density*((4/3)*(math.pi)*radius**3)
8c1a2dc949980ca96a4f56f3918bacd19568965e
2,797
def should_parse(config, file): """Check if file extension is in list of supported file types (can be configured from cli)""" return file.suffix and file.suffix.lower() in config.filetypes
1c2258d405ef715574b557d99cdf87e461627ffd
2,799
def menu(): """Manda el Menú \n Opciones: 1: Añadir a un donante 2: Añadir a un donatario 3: Revisar la lista de donantes 4: Revisar la lista de donatarios 5: Realizar una transfusion 6: Estadisticas 7: Salir Returns: opc(num):Opcion del menu """ print("\nBienvenido a el sistema de Donacion de Sangre. Elige la accion que deseas realizar.\n1.Añadir Donante de Sangre\n2.Añadir Donatario de Sangre\n3.Revisar lista de Donantes\n4.Revisar Lista de Donatarios\n5.Realizar una transfusion\n6.Estadisticas\n7.Salir") opc=int(input("Seleccionar: ")) return opc
805d1ef48fbe03f8185e3c7be71ce3d9aa6104df
2,802
def flatten(x): """Flattens nested list""" if isinstance(x, list): return [a for i in x for a in flatten(i)] else: return [x]
7d348f8287dfccfbb77a52a84a5642c265381eb1
2,804
import fileinput def parse_input(): """Parse input and return array of calendar A user can either pass the calendar via the stdin or via one or several icalendar files. This method will parse the input and return an array of valid icalendar """ input_data = '' calendars = [] for line in fileinput.input(): if 'BEGIN:VCALENDAR' in line: calendars.append(input_data) input_data = line else: input_data += line calendars.append(input_data) return calendars[1:]
a60a760968f139da0b7753ae5717d78b640cb232
2,805
def identity(obj): """Returns the ``obj`` parameter itself :param obj: The parameter to be returned :return: ``obj`` itself >>> identity(5) 5 >>> foo = 2 >>> identity(foo) is foo True """ return obj
a3271a831d2e91fe6eebed7e80c18e7c81996da6
2,806
def with_setup_(setup=None, teardown=None): """Decorator like `with_setup` of nosetest but which can be applied to any function""" def decorated(function): def app(*args, **kwargs): if setup: setup() try: function(*args, **kwargs) finally: if teardown: teardown() return app return decorated
f9e8eddfd01ee99e458857de403c49b91dafa92c
2,807
def hub_quantile_prediction_dict_validator(target_group_dict, prediction_dict): """ Does hub prediction_dict validation as documented in `json_io_dict_from_quantile_csv_file()` """ error_messages = [] # return value. filled next valid_quantiles = target_group_dict['quantiles'] prediction_quantiles = prediction_dict['prediction']['quantile'] if set(valid_quantiles) != set(prediction_quantiles): error_messages.append(f"prediction_dict quantiles != valid_quantiles. valid_quantiles={valid_quantiles}, " f"prediction_quantiles={prediction_quantiles}") return error_messages
ec13824557ef9533d7c4a777daadd07414752767
2,810
def GetMappingKeyName(run, user): """Returns a str used to uniquely identify a mapping.""" return 'RunTesterMap_%s_%s' % (run.key().name(), str(user.user_id()))
b4eb80ca5f084ea956f6a458f92de1b85e722cda
2,811
def year_from_operating_datetime(df): """Add a 'year' column based on the year in the operating_datetime. Args: df (pandas.DataFrame): A DataFrame containing EPA CEMS data. Returns: pandas.DataFrame: A DataFrame containing EPA CEMS data with a 'year' column. """ df['year'] = df.operating_datetime_utc.dt.year return df
1c7bbc6465d174465151e5e777671f319ee656b7
2,812
def get_seats_percent(election_data): """ This function takes a lists of lists as and argument, with each list representing a party's election results, and returns a tuple with the percentage of Bundestag seats won by various political affiliations. Parameters: election_data (list): A list of lists, each representing a party's election results Returns: A tuple with percentage of Bundestag seats won by various political affiliations """ left_seats = 0 right_seats = 0 extreme_seats = 0 center_seats = 0 total_bundestag_seats = 0 for party in election_data[1:]: total_bundestag_seats += int(party[1]) if 'far' in party[2]: extreme_seats += party[1] else: center_seats += party[1] if 'left' in party[2]: left_seats += party[1] else: right_seats += party[1] left_percent = round((left_seats / total_bundestag_seats * 100), 2) right_percent = round((right_seats / total_bundestag_seats * 100), 2) extreme_percent = round((extreme_seats / total_bundestag_seats * 100), 2) center_percent = round((center_seats / total_bundestag_seats * 100), 2) return left_percent, right_percent, extreme_percent, center_percent
a131d64747c5c0dde8511e9ec4da07252f96a6ec
2,815