content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def response_GET(client, url): """Fixture that return the result of a GET request.""" return client.get(url)
b4762c9f652e714cc5c3694b75f935077039cb02
708,343
def _get_realm(response): """Return authentication realm requested by server for 'Basic' type or None :param response: requests.response :type response: requests.Response :returns: realm :rtype: str | None """ if 'www-authenticate' in response.headers: auths = response.headers['www-authenticate'].split(',') basic_realm = next((auth_type for auth_type in auths if auth_type.rstrip().lower().startswith("basic")), None) if basic_realm: realm = basic_realm.split('=')[-1].strip(' \'\"').lower() return realm else: return None else: return None
346b3278eb52b565f747c952493c15820eece729
708,344
from typing import List def _is_binary_classification(class_list: List[str]) -> bool: """Returns true for binary classification problems.""" if not class_list: return False return len(class_list) == 1
82ada7dd8df93d58fad489b19b9bf4a93ee819c3
708,345
def get_in(obj, lookup, default=None): """ Walk obj via __getitem__ for each lookup, returning the final value of the lookup or default. """ tmp = obj for l in lookup: try: # pragma: no cover tmp = tmp[l] except (KeyError, IndexError, TypeError): # pragma: no cover return default return tmp
73dfcaadb6936304baa3471f1d1e980f815a7057
708,346
def tmNstate(trTrg): """Given (newq, new_tape_sym, dir), return newq. """ return trTrg[0]
17db0bc5cae4467e7a66d506e1f32d48c949e5eb
708,347
import argparse def parse_arguments(args): """ Parse the arguments from the user """ parser = argparse.ArgumentParser( description= "Create UniRef database for HUMAnN2\n", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "-v","--verbose", help="additional output is printed\n", action="store_true", default=False) parser.add_argument( "-i","--input", help="the UniRef fasta file to read\n", required=True) parser.add_argument( "-o","--output", help="the UniRef database to write\n", required=True) parser.add_argument( "-f","--filter", help="string to use for filtering (example: uncharacterized)\n") parser.add_argument( "--exclude-list", help="file of id list to use for filtering (example: id_list.tsv)\n") parser.add_argument( "--include-list", help="file of id list to use for filtering (example: id_list.tsv)\n") parser.add_argument( "-d","--format-database", choices=["fasta","rapsearch","diamond"], default="fasta", help="format of output files (default: fasta)\n") return parser.parse_args()
d547c0017904d91a930f149a6b085d2e0c87fe88
708,348
def slim_form(domain_pk=None, form=None): """ What is going on? We want only one domain showing up in the choices. We are replacing the query set with just one object. Ther are two querysets. I'm not really sure what the first one does, but I know the second one (the widget) removes the choices. The third line removes the default u'--------' choice from the drop down. """ return form
7b58674e307fbbd31f0546b70309c0c723d1021c
708,349
import re import requests def codepoint_to_url(codepoint, style): """ Given an emoji's codepoint (e.g. 'U+FE0E') and a non-apple emoji style, returns a url to to the png image of the emoji in that style. Only works for style = 'twemoji', 'noto', and 'blobmoji'. """ base = codepoint.replace('U+', '').lower() if style == 'twemoji': # See discussion in commit 8115b76 for more information about # why the base needs to be patched like this. patched = re.sub(r'0*([1-9a-f][0-9a-f]*)', lambda m: m.group(1), base.replace(' ', '-').replace('fe0f-20e3', '20e3').replace('1f441-fe0f-200d-1f5e8-fe0f', '1f441-200d-1f5e8')) response = requests.get('https://github.com/twitter/twemoji/raw/gh-pages/v/latest') version = response.text if response.ok else None if version: return 'https://github.com/twitter/twemoji/raw/gh-pages/v/%s/72x72/%s.png' \ % (version, patched) else: return 'https://github.com/twitter/twemoji/raw/master/assets/72x72/%s.png' \ % patched elif style == 'noto': return 'https://github.com/googlefonts/noto-emoji/raw/master/png/128/emoji_u%s.png' \ % base.replace(' ', '_') elif style == 'blobmoji': return 'https://github.com/C1710/blobmoji/raw/master/png/128/emoji_u%s.png' \ % base.replace(' ', '_')
a5b47f5409d465132e3fb7141d81dbd617981ca8
708,350
def getRNCS(ChargeSA): """The calculation of relative negative charge surface area -->RNCS """ charge=[] for i in ChargeSA: charge.append(float(i[1])) temp=[] for i in ChargeSA: temp.append(i[2]) try: RNCG = min(charge)/sum([i for i in charge if i < 0.0]) return temp[charge.index(min(charge))]/RNCG except: return 0.0
f03011de85e1bcac01b2aba4afde61a3dd9f7866
708,351
def decode(chrom): """ Returns the communities of a locus-based adjacency codification in a vector of int where each position is a node id and the value of that position the id of the community where it belongs. To position with the same number means that those two nodes belongs to same community. """ try: size = len(chrom) last_c = 0 communities = [float("inf")] * size pending = set(range(size)) while len(pending) != 0: index = int(pending.pop()) neighbour = int(chrom[index]) if neighbour != -1: communities[index] = min(last_c, communities[index], communities[neighbour]) while neighbour in pending: pending.remove(neighbour) communities[neighbour] = min(last_c, communities[neighbour]) neighbour = int(chrom[neighbour]) last_c += 1 return communities except Exception as e: raise e
998a58e0d4efad2c079a9d023530aca37d0e226e
708,352
import math def bin_search(query, data): """ Query is a coordinate interval. Approximate binary search for the query in sorted data, which is a list of coordinates. Finishes when the closest overlapping value of query and data is found and returns the index in data. """ i = int(math.floor(len(data)/2)) # binary search prep lower, upper = 0, len(data) if not upper: return -1 tried = set() rightfound = '' # null value in place of 0, which is a valid value for rightfound while not (data[i][0] <= query[0] and data[i][1] >= query[0]): # query left coordinate not found in data yet if data[i][0] <= query[1] and data[i][1] >= query[1]: # query right found, will keep looking for left rightfound = i if data[i][1] < query[0]: # i is too low of an index lower = i i = int(math.floor((lower + upper)/2.)) else: # i is too high of an index upper = i i = int(math.floor((lower + upper)/2.)) if i in tried or i == upper: if data[i][0] >= query[0] and data[i][1] <= query[1]: # data interval sandwiched inside query break elif i + 1 < len(data) and data[i+1][0] > query[0] and data[i+1][1] < query[1]: # data can be incremented i = i + 1 else: i = rightfound if rightfound != '' else -1 break tried.add(i) return i
bb93034bc5c7e432c3fc55d4485949688e62b84a
708,353
import struct def Decodingfunc(Codebyte): """This is the version 'A' of decoding function, that decodes data coded by 'A' coding function""" Decodedint=struct.unpack('b',Codebyte)[0] N=0 #number of repetitions L=0 # length of single/multiple sequence if Decodedint >= 0: #single N = 1 L = Decodedint+1 else: #multiple L = -Decodedint//16+1 N = -Decodedint-(L-1)*16+1 #print("N =",N," L =",L) return (N,L)
450a3e6057106e9567952b33271935392702aea9
708,354
from unittest.mock import patch def dont_handle_lock_expired_mock(app): """Takes in a raiden app and returns a mock context where lock_expired is not processed """ def do_nothing(raiden, message): # pylint: disable=unused-argument return [] return patch.object( app.raiden.message_handler, "handle_message_lockexpired", side_effect=do_nothing )
2a893e7e755010104071b2b1a93b60a0417e5457
708,355
def system(_printer, ast): """Prints the instance system initialization.""" process_names_str = ' < '.join(map(lambda proc_block: ', '.join(proc_block), ast["processNames"])) return f'system {process_names_str};'
f16c6d5ebe1a029c07efd1f34d3079dd02eb4ac0
708,356
def blend_multiply(cb: float, cs: float) -> float: """Blend mode 'multiply'.""" return cb * cs
d53c3a49585cf0c12bf05c233fc6a9dd30ad25b9
708,357
def my_func_1(x, y): """ Возвращает возведение числа x в степень y. Именованные параметры: x -- число y -- степень (number, number) -> number >>> my_func_1(2, 2) 4 """ return x ** y
9572566f1660a087056118bf974bf1913348dfa4
708,358
def matrix_mult(a, b): """ Function that multiplies two matrices a and b Parameters ---------- a,b : matrices Returns ------- new_array : matrix The matrix product of the inputs """ new_array = [] for i in range(len(a)): new_array.append([0 for i in range(len(b[0]))]) for j in range(len(b[0])): for k in range(len(a[0])): new_array[i][j] += a[i][k] * b[k][j] return new_array
5e0f27f29b6977ea38987fa243f08bb1748d4567
708,359
def build_eslog_config_param( group_id, task_name, rt_id, tasks, topic, table_name, hosts, http_port, transport, es_cluster_name, es_version, enable_auth, user, password, ): """ es参数构建 :param group_id: 集群名 :param task_name: 任务名 :param rt_id: rt_id :param tasks: 任务数 :param topic: 来源topic :param table_name: 表名 :param hosts: es的host :param http_port: es的port :param transport: es transport的port :param es_cluster_name: es集群名称 :param es_version es集群版本 :param enable_auth 是否启用验证 :param user: 用户名 :param password: 密码, 加密过的 :return: 参数 """ return { "group.id": group_id, "rt.id": rt_id, "topics": topic, "type.name": table_name, "tasks.max": "%s" % tasks, "es.index.prefix": table_name.lower(), "es.cluster.name": es_cluster_name, "es.cluster.version": es_version, "es.hosts": hosts, "es.transport.port": transport, "es.host": hosts, "es.http.port": http_port, "connector.class": "com.tencent.bk.base.datahub.databus.connect.sink.es.EsSinkConnector", "flush.timeout.ms": "10000", "batch.size": "10000", "max.in.flight.requests": "5", "retry.backoff.ms": "5000", "max.retry": "5", "es.cluster.enable.auth": enable_auth, "es.cluster.enable.PlaintextPwd": False, # 当前都是加密后的密码 "es.cluster.username": user, "es.cluster.password": password, }
826b8d97ef14792845b4ced98ab5dcb3f36e57f3
708,360
import os def get_all_file_paths(directory): """ Gets all the files in the specified input directory """ file_paths = [] for root, _, files in os.walk(directory): for filename in files: filepath = os.path.join(root, filename) file_paths.append(filepath) return file_paths
7055a3f3f3be5f6e0074cef55689c6234d38deb6
708,361
def loadDataSet(): """ load data from data set Args: Returns: dataSet: train input of x labelSet: train input of y """ # initialize x-trainInput,y-trainInput dataSet = [] labelSet = [] # open file reader fr = open('testSet.txt') for line in fr.readlines(): # strip() -- get rid of the space on both side # split() -- division as tab lineArr = line.strip().split() # padding data in list # x0 = 1.0 , x1 = column1 , x2 = column2 dataSet.append([1.0, float(lineArr[0]), float(lineArr[1])]) # label = column3 labelSet.append(float(lineArr[2])) return dataSet,labelSet
38f42a8a7c6b12e3d46d757d98565222e931149f
708,362
def clear_bit(val, offs): """Clear bit at offset 'offs' in value.""" return val & ~(1 << offs)
e50e5f8ccc3fe08d9b19248e290c2117b78379ee
708,363
def _ExtractCLPath(output_of_where): """Gets the path to cl.exe based on the output of calling the environment setup batch file, followed by the equivalent of `where`.""" # Take the first line, as that's the first found in the PATH. for line in output_of_where.strip().splitlines(): if line.startswith('LOC:'): return line[len('LOC:'):].strip()
6a0c0d4aa74b4e84de69de023e2721edd95c36bd
708,364
def get_word_vector_list(doc, w2v): """Get all the vectors for a text""" vectors = [] for word in doc: try: vectors.append(w2v.wv[word]) except KeyError: continue return vectors
f228c2100b6a622fdb677954257e2d1590dcc0ff
708,365
from typing import Optional from pathlib import Path import importlib def destination(stub: str) -> Optional[Path]: """Determine stub path Only handle micropython stubs, ignoring any cPython stdlib equivalents. """ prefix, _, suffix = stub.partition(".") if importlib.util.find_spec(prefix): # type: ignore return # in cPython stdlib, skip prefix = Path(prefix) if suffix in ("py", "pyi"): # module return prefix / f"__init__.{suffix}" return prefix / suffix
8b2552513dbeaa9dc09cb85703b736e17c4788b5
708,366
def get_edges_out_for_vertex(edges: list, vertex: int) -> list: """Get a sublist of edges that have the specified vertex as first element :param edges: edges of the graph :param vertex: vertex of which we want to find the corresponding edges :return: selected edges """ return [e for e in edges if e[0] == vertex]
21485073df1c754e7c8e2b7dd9cafef284e601e7
708,367
def convert_to_floats(tsi): """ A helper function that tax all of the fields of a TaxSaveInputs model and converts them to floats, or list of floats """ def numberfy_one(x): if isinstance(x, float): return x else: return float(x) def numberfy(x): if isinstance(x, list): return [numberfy_one(i) for i in x] else: return numberfy_one(x) attrs = vars(tsi) return {k: numberfy(v) for k, v in list(attrs.items()) if v}
a6f93f402c547435fa9fe611481084215f52f13b
708,368
def build_job_spec_name(file_name, version="develop"): """ :param file_name: :param version: :return: str, ex. job-hello_world:develop """ name = file_name.split('.')[-1] job_name = 'job-%s:%s' % (name, version) return job_name
55a45052852e6b24cb4370f7efe5c213da83e423
708,369
def conditions(x): """ This function will check whether the constraints that apply to our optimization are met or not. """ if ( (10/x[0]) > 66.0 ): return False elif ( (10/x[0] + 12/x[1]) > 88.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2]) > 107.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3]) > 128.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4]) > 157.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5]) > 192.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6]) > 222.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7]) > 242.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8]) > 268.0 ): return False elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8] + 8/x[9]) > 292.0 ): return False return True
263fdc3fd07aa656982401f71071fcd684b8625f
708,372
def single_from(iterable): """Check that an iterable contains one unique value, and return it.""" unique_vals = set(iterable) if len(unique_vals) != 1: raise ValueError('multiple unique values found') return unique_vals.pop()
c8fb8864083195ad913ff1ddf0114b5a50068902
708,373
from typing import Dict def line_coloring(num_vertices) -> Dict: """ Creates an edge coloring of the line graph, corresponding to the optimal line swap strategy, given as a dictionary where the keys correspond to the different colors and the values are lists of edges (where edges are specified as tuples). The graph coloring consists of one color for all even-numbered edges and one color for all odd-numbered edges. Args: num_vertices: The number of vertices in the line graph Returns: Graph coloring as a dictionary of edge lists """ line_coloring = {} for i in range(num_vertices - 1): line_coloring[(i, i + 1)] = i % 2 line_coloring[(i + 1, i)] = i % 2 return line_coloring
423e626ecbf4f48e0a192241375484a077fbe0b2
708,374
def flatten_outputs(predictions, number_of_classes): """Flatten the prediction batch except the prediction dimensions""" logits_permuted = predictions.permute(0, 2, 3, 1) logits_permuted_cont = logits_permuted.contiguous() outputs_flatten = logits_permuted_cont.view(-1, number_of_classes) return outputs_flatten # outputs_flatten = torch.tensor(predictions
c58fb965443a5402e9bec32afaebe9376c74653f
708,375
def get_r_vals(cell_obj): """Get radial distances for inner and outer membranes for the cell object""" r_i = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_inner']['x'], cell_obj.data.data_dict['storm_inner']['y']) r_o = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_outer']['x'], cell_obj.data.data_dict['storm_outer']['y']) return r_i, r_o
d51c926791845006dfe9a97cbd9c82c041ea701b
708,376
def extract(input_data: str) -> tuple: """take input data and return the appropriate data structure""" rules = input_data.split('\n') graph = dict() reverse_graph = dict() for rule in rules: container, contents = rule.split('contain') container = ' '.join(container.split()[:2]) content_graph = dict() for content in contents.split(','): if content == " no other bags.": break parts = content.split() amount = int(parts[0]) color = ' '.join(parts[1:3]) content_graph[color] = amount if color in reverse_graph.keys(): reverse_graph[color].append(container) else: reverse_graph[color] = [container] graph[container] = content_graph return (graph, reverse_graph)
f71cdc23fdfaf6ef0d054c0c68e513db66289c12
708,377
def api_2_gamma_oil(value): """ converts density in API(American Petroleum Institute gravity) to gamma_oil (oil relative density by water) :param value: density in API(American Petroleum Institute gravity) :return: oil relative density by water """ return (value + 131.5) / 141.5
20e625f22092461fcf4bc2e2361525abf8051f97
708,378
def cluster_profile_platform(cluster_profile): """Translate from steps.cluster_profile to workflow.as slugs.""" if cluster_profile == 'azure4': return 'azure' if cluster_profile == 'packet': return 'metal' return cluster_profile
0a01f566562002fe43c3acbb00d5efcc09d25314
708,379
import os def get_model_python_path(): """ Returns the python path for a model """ return os.path.dirname(__file__)
5ddd66f8b0c37b8a84eab614c4e3efd6efe9d9ef
708,381
import os import errno def convertGMLToGeoJSON(config, outputDir, gmlFilepath, layerName, t_srs='EPSG:4326', flip_gml_coords=False): """ Convert a GML file to a shapefile. Will silently exit if GeoJSON already exists @param config A Python ConfigParser containing the section 'GDAL/OGR' and option 'PATH_OF_OGR2OGR' @param outputDir String representing the absolute/relative path of the directory into which GeoJSON should be written @param gmlFilepath String representing the absolute path of the GML file to convert @param layerName String representing the name of the layer contained in the GML file to write to a GeoJSON @param t_srs String representing the spatial reference system of the output GeoJSON, of the form 'EPSG:XXXX' @return String representing the name of the GeoJSON written @exception Exception if the conversion failed. """ pathToOgrCmd = config.get('GDAL/OGR', 'PATH_OF_OGR2OGR') if not os.path.isdir(outputDir): raise IOError(errno.ENOTDIR, "Output directory %s is not a directory" % (outputDir,)) if not os.access(outputDir, os.W_OK): raise IOError(errno.EACCES, "Not allowed to write to output directory %s" % (outputDir,)) outputDir = os.path.abspath(outputDir) geojsonFilename = "%s.geojson" % (layerName,) geojsonFilepath = os.path.join(outputDir, geojsonFilename) if not os.path.exists(geojsonFilepath): # Need to flip coordinates in GML as SSURGO WFS now returns coordinates in lat, lon order # rather than lon, lat order that OGR expects. For more information, see: # http://trac.osgeo.org/gdal/wiki/FAQVector#HowdoIflipcoordinateswhentheyarenotintheexpectedorder if flip_gml_coords and t_srs =='EPSG:4326': ogrCommand = "%s -f 'GeoJSON' -nln %s -s_srs '+proj=latlong +datum=WGS84 +axis=neu +wktext' -t_srs %s %s %s" % (pathToOgrCmd, layerName, t_srs, geojsonFilepath, gmlFilepath) else: ogrCommand = "%s -f 'GeoJSON' -nln %s -t_srs %s %s %s" % (pathToOgrCmd, layerName, t_srs, geojsonFilepath, gmlFilepath) returnCode = os.system(ogrCommand) if returnCode != 0: raise Exception("GML to GeoJSON command %s returned %d" % (ogrCommand, returnCode)) return geojsonFilename
70ee0676d13a647d42a39313d5be1545042f73c7
708,382
import os import re def _generate_flame_clip_name(item, publish_fields): """ Generates a name which will be displayed in the dropdown in Flame. :param item: The publish item being processed. :param publish_fields: Publish fields :returns: name string """ # this implementation generates names on the following form: # # Comp, scene.nk (output background), v023 # Comp, Nuke, v023 # Lighting CBBs, final.nk, v034 # # (depending on what pieces are available in context and names, names # may vary) context = item.context name = "" # If we have template fields passed in, then we'll try to extract # some information from them. If we don't, then we fall back on # some defaults worked out below. publish_fields = publish_fields or dict() # the shot will already be implied by the clip inside Flame (the clip # file which we are updating is a per-shot file. But if the context # contains a task or a step, we can display that: if context.task: name += "%s, " % context.task["name"].capitalize() elif context.step: name += "%s, " % context.step["name"].capitalize() # If we have a channel set for the write node or a name for the scene, # add those. If we don't have a name from the template fields, then we # fall back on the file sequence's basename without the extension or # frame number on the end (if possible). default_name, _ = os.path.splitext( os.path.basename(item.properties["sequence_paths"][0]) ) # Strips numbers off the end of the file name, plus any underscore or # . characters right before it. # # foo.1234 -> foo # foo1234 -> foo # foo_1234 -> foo default_name = re.sub(r"[._]*\d+$", "", default_name) rp_name = publish_fields.get("name", default_name,) rp_channel = publish_fields.get("channel") if rp_name and rp_channel: name += "%s.nk (output %s), " % (rp_name, rp_channel) elif not rp_name: name += "Nuke output %s, " % rp_channel elif not rp_channel: name += "%s.nk, " % rp_name else: name += "Nuke, " # Do our best to get a usable version number. If we have data extracted # using a template, we use that. If we don't, then we can look to see # if this publish item came with a clip PublishedFile, in which case # we use the version_number field from that entity +1, as a new version # of that published clip will be created as part of this update process, # and that is what we want to associate ourselves with here. version = publish_fields.get("version") if version is None and "flame_clip_publish" in item.properties: version = item.properties["flame_clip_publish"]["version_number"] + 1 version = version or 0 name += "v%03d" % version return name
847956c6897a873145c78adbcf6530f0a47a9259
708,383
def statistic_bbox(dic, dic_im): """ Statistic number of bbox of seed and image-level data for each class Parameters ---------- dic: seed roidb dictionary dic_im: image-level roidb dictionary Returns ------- num_bbox: list for number of 20 class's bbox num_bbox_im: list for number of 20 class's bbox """ num_bbox = [0] * 20 num_bbox_im = [0] * 20 for d in dic: for c in d['gt_classes']: num_bbox[c-1] += 1 for d in dic_im: for c in d['gt_classes']: num_bbox_im[c-1] += 1 print("Statistic for seed data bbox: ", num_bbox) print("Statistic for image-level data bbox: ", num_bbox_im) return num_bbox, num_bbox_im
782314baeab7fbec36c9ea56bcec57d5a508a918
708,384
import base64 import pickle def encode(something): """ We encode all messages as base64-encoded pickle objects in case later on, we want to persist them or send them to another system. This is extraneous for now. """ return base64.b64encode(pickle.dumps(something))
89c9c855b8b66aadc55c1602e133906d3220691a
708,386
import re def paginatedUrls(pattern, view, kwargs=None, name=None): """ Takes a group of url tuples and adds paginated urls. Extends a url tuple to include paginated urls. Currently doesn't handle url() compiled patterns. """ results = [(pattern, view, kwargs, name)] tail = '' mtail = re.search('(/+\+?\\*?\??\$?)$', pattern) if mtail: tail = mtail.group(1) pattern = pattern[:len(pattern) - len(tail)] results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)] results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" + tail, view, kwargs)] if not kwargs: kwargs = dict() kwargs['page_limit'] = 0 results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)] return results
2102309434e02e0df49888978d41ffce2de0e2dc
708,387
def remove_empty(s): """\ Remove empty strings from a list. >>> a = ['a', 2, '', 'b', ''] >>> remove_empty(a) [{u}'a', 2, {u}'b'] """ while True: try: s.remove('') except ValueError: break return s
98778e4cc90f11b9b74ac6d26b203cbfc958fd7b
708,388
import pathlib import os def normalize_path(filepath, expand_vars=False): """ Fully normalizes a given filepath to an absolute path. :param str filepath: The filepath to normalize :param bool expand_vars: Expands embedded environment variables if True :returns: The fully noralized filepath :rtype: str """ filepath = str(pathlib.Path(filepath).expanduser().resolve()) if expand_vars: filepath = os.path.expandvars(filepath) return filepath
d408d6c1cd86072473a52f626821fcebd380c29d
708,389
import hashlib def md5_hash_file(fh): """Return the md5 hash of the given file-object""" md5 = hashlib.md5() while True: data = fh.read(8192) if not data: break md5.update(data) return md5.hexdigest()
f572ec27add8024e5fa8b9a82b5d694905e4d0f8
708,390
def Iq(q, intercept, slope): """ :param q: Input q-value :param intercept: Intrecept in linear model :param slope: Slope in linear model :return: Calculated Intensity """ inten = intercept + slope*q return inten
af3e580e6061089b431ef25f1f08def6f29c8ef6
708,391
def _cast_query(query, col): """ ALlow different query types (e.g. numerical, list, str) """ query = query.strip() if col in {"t", "d"}: return query if query.startswith("[") and query.endswith("]"): if "," in query: query = ",".split(query[1:-1]) return [i.strip() for i in query] if query.isdigit(): return int(query) try: return float(query) except Exception: return query
4b6cfc823f8b2e78f343e73683b418112e66f43d
708,392
def find_first_img_dim(import_gen): """ Loads in the first image in a provided data set and returns its dimensions Intentionally returns on first iteration of the loop :param import_gen: PyTorch DataLoader utilizing ImageFolderWithPaths for its dataset :return: dimensions of image """ for x, _, _ in import_gen: return x[0].shape[-2], x[0].shape[-1]
3ccaccdfb20d7b2ca4d339adacd3c706a460fdef
708,393
import math def q_b(m0, m1, m2, n0, n1, n2): """Stretch""" return math.sqrt((m0 - n0)**2 + (m1 - n1)**2 + (m2 - n2)**2)
61cf1b5eec6c89be7f822cbdbc03564b805a1920
708,394
from typing import List def metadata_partitioner(rx_txt: str) -> List[str]: """Extract Relax program and metadata section. Parameters ---------- rx_txt : str The input relax text. Returns ------- output : List[str] The result list of partitioned text, the first element is the relax program, and the second is metadata section. """ partitions = [] left_curly = 0 meta_start = 0 meta_end = 0 for i, char in enumerate(rx_txt): if i < 0: raise ValueError("The program is invalid.") if char == "{": if meta_start == 0: meta_start = i left_curly += 1 elif char == "}": left_curly -= 1 if left_curly == 0: meta_end = i + 1 break if meta_end == 0: raise ValueError("The metadata section was not found.") metadata = rx_txt[meta_start:meta_end] rx_program = rx_txt[meta_end:-1] partitions.append(rx_program) partitions.append(metadata) return partitions
dd09aff9ea517813d43ff307fb9fc425b7338943
708,395
def make_aware(value, timezone): """ Makes a naive datetime.datetime in a given time zone aware. """ if hasattr(timezone, 'localize'): # available for pytz time zones return timezone.localize(value, is_dst=None) else: # may be wrong around DST changes return value.replace(tzinfo=timezone)
b466b4fda2daf54b7aa5e8f00ad7b10397e61c7b
708,396
def to_dict(funs): """Convert an object to a dict using a dictionary of functions. to_dict(funs)(an_object) => a dictionary with keys calculated from functions on an_object Note the dictionary is copied, not modified in-place. If you want to modify a dictionary in-place, do adict.update(to_dict(funs)(a_dict)) Use to_dict(funs) in a map, and you can generate a list of dictionaries from a list of objects (which could also be dictionaries). :: K is hashable type => {K: (X -> V)} -> [X] -> {K: V} Equivalent to the following in Python 3: {k: f(an_object) for (k, f) in funs.items()} >>> from operator import itemgetter >>> funs = {'id': itemgetter('id'), 'fullname': lambda x: '%(forename)s %(surname)s' % x} >>> an_object = {'id': 1, 'forename': 'Fred', 'surname': 'Bloggs'} >>> result = to_dict(funs)(an_object) >>> result['id'] 1 >>> result['fullname'] 'Fred Bloggs' >>> 'forename' in result # Original keys are left out False """ def to_dict_funs(an_object): return dict((k, f(an_object)) for (k, f) in funs.items()) return to_dict_funs
d22bbcb3c1913361c3906fd2e7f3d254dc67de28
708,397
def fin(activity): """Return the end time of the activity. """ return activity.finish
ed5b1d1e0f29f403cfee357a264d05d5cc88093e
708,398
def solve(in_array): """ Similar to 46442a0e, but where new quadrants are flips of the original array rather than rotations :param in_array: input array :return: expected output array """ array_edgelength = len(in_array[0]) # input array edge length opp_end = array_edgelength*2-1 # used for getting opposite end of array prediction = [[-1]*array_edgelength*2 for i in range(array_edgelength*2)] # init 2d array # iterate through all values for y in range(len(in_array)): for x in range(len(in_array[0])): val = in_array[y][x] prediction[y][x] = val # other 3 quadrants are flips prediction[y][opp_end-x] = val prediction[opp_end-y][opp_end-x] = val prediction[opp_end-y][x] = val return prediction
0af23e82caf65bea64eeeae6da8400ef6ec03426
708,399
def get_mapping_rules(): """ Get mappings rules as defined in business_object.js Special cases: Aduit has direct mapping to Program with program_id Request has a direct mapping to Audit with audit_id Response has a direct mapping to Request with request_id DocumentationResponse has a direct mapping to Request with request_id DocumentationResponse has normal mappings with all other objects in maping modal Section has a direct mapping to Standard/Regulation/Poicy with directive_id Anything can be mapped to a request, frotent show audit insted """ def filter(object_list): """ remove all lower case items since real object are CamelCase """ return set([item for item in object_list if item != item.lower()]) # these rules are copy pasted from # src/ggrc/assets/javascripts/apps/business_objects.js line: 276 business_object_rules = { "Program": "Issue ControlAssessment Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa # removed Person because Programs have a "Mapped" attribute for people mappings "Audit": "Issue ControlAssessment Request history Person program program_controls Request", # noqa "Issue": "ControlAssessment Control Audit Program Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Issue Request", # noqa "ControlAssessment": "Issue Objective Program Regulation Contract Policy Standard Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Regulation": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Policy": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Standard": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Contract": "Program Issue ControlAssessment Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Request", # noqa "Clause": "Contract Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Policy Regulation Standard Request", # noqa "Section": "Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Policy Regulation Standard Contract Clause Request", # noqa "Objective" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Control" : "Issue ControlAssessment Request Program Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Person" : "Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa "OrgGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Vendor" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "System" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Process" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "DataAsset" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "AccessGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Product" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Project" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Facility" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Market" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request" # noqa } split_rules = {k: v.split() for k, v in business_object_rules.items()} filtered_rules = {k: filter(v) for k, v in split_rules.items()} return filtered_rules
59b94070d3fe35eca8c356162caf9969c9ea47d0
708,400
import os from pathlib import Path import zipfile def extract_zip(zip_path, ret_extracted_path=False): """Extract a zip and delete the .zip file.""" dir_parents = os.path.dirname(zip_path) dir_name = Path(zip_path).stem extracted_path = os.path.join(dir_parents, dir_name, '') if ret_extracted_path: return extracted_path with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(dir_parents) os.remove(zip_path) print(f"Extracted '{Path(zip_path).name}' to '{extracted_path}'.")
6ff7691ed54ce3941941b3e014f92ee362237b7c
708,401
def get_version(): """ Do this so we don't have to import lottery_ticket_pruner which requires keras which cannot be counted on to be installed when this package gets installed. """ with open('lottery_ticket_pruner/__init__.py', 'r') as f: for line in f.readlines(): if line.startswith('__version__'): version = line.split('=')[1].strip().replace('"', '').replace('\'', '') return version return ''
0ab355110918e1c92b056932ba1d03768826c4f2
708,402
def extract(d, keys): """ Extract a key from a dict. :param d: The dict. :param keys: A list of keys, in order of priority. :return: The most important key with an value found. """ if not d: return for key in keys: tmp = d.get(key) if tmp: return tmp
9985e2f1079088251429fa26611fa6e15b920622
708,403
from pathlib import Path def get_output_filename(output_folder: str, repository_type: str, repository_name: str, filename: str) -> Path: """Returns the output filename for the file fetched from a repository.""" return ( Path(output_folder) / Path(repository_type.lower()) / Path(Path(repository_name).name) / Path(Path(filename).name) )
23b806f98265b45b799dbcc177760d5ceb8248fb
708,404
import logging def get_vertical_axes(nc_file): """ Scan input netCDF file and return a list of vertical axis variables, requiring specific axis names """ vertical_axes = [] for var_name, var in nc_file.variables.items(): if var_name in ('full_levels', 'half_levels'): vertical_axes.append(var) logging.info('Found %i vertical axes.', len(vertical_axes)) return vertical_axes
f26b89d9d9839759f3b1ed7a990d548f996e29d2
708,406
def b_2_d(x): """ Convert byte list to decimal :param x: byte list :return: decimal """ s = 0 for i in range(0, len(x)): s += x[i]*2**i return s
e865700ea30be535ad014908d6b6024186cc5ac6
708,407
import pickle def get_weights(): """ Loads uni-modal text and image CNN model weights. Returns: tuple: text and image weights. """ text_weight_file = open("models/unimodal_text_CNN_weights.pickle", "rb") text_weights = pickle.load(text_weight_file) text_weight_file.close() image_weight_file = open("models/unimodal_image_CNN_LSTM_weights.pickle", "rb") image_weights = pickle.load(image_weight_file) image_weight_file.close() return text_weights, image_weights
abff59a197130f5776fdb0cacc3f895ff5d7393e
708,408
def collect_users(): """Collect a list of all Santas from the user""" list_of_santas = [] while 1: item = input("Enter a name\n") if not item: break list_of_santas.append(item) return list_of_santas
d86ec360518fdb497b86b7f631fee0dc4464e2bb
708,409
def view_inv(inventory_list): """list -> None empty string that adds Rental attributes """ inventory_string = '' for item in inventory_list: inventory_string += ('\nRental: ' + str(item[0])+ '\nQuantity: '+ str(item[1])+ '\nDeposit: '+"$"+ str(item[2])+"\nPrice Per Week: "+ "$" + str(item[3])+ '\nReplacement Value: '+ "$" + str(int(item[4]))+ "\n") return inventory_string
540b6bb2597ba5686a070749c2526ad09be25d5f
708,410
def generate_smb_proto_payload(*protos): """Generate SMB Protocol. Pakcet protos in order. """ hexdata = [] for proto in protos: hexdata.extend(proto) return "".join(hexdata)
848fdad11941a6d917bd7969fb7ffb77025cd13d
708,411
import pathlib import stat def check_file(file_name): """ test if file: exists and is writable or can be created Args: file_name (str): the file name Returns: (pathlib.Path): the path or None if problems """ if not file_name: return None path = pathlib.Path(file_name) # if file exists test if writable if path.exists() and path.is_file(): handle = None try: handle = open(path, 'w') except PermissionError: return None finally: if handle: handle.close() # crate file with write permissions try: path.touch(stat.S_IWUSR) except PermissionError: return None return path
5b8ff64795aa66d3be71444e158357c9b7a1b2c0
708,412
def echo(word:str, n:int, toupper:bool=False) -> str: """ Repeat a given word some number of times. :param word: word to repeat :type word: str :param n: number of repeats :type n: int :param toupper: return in all caps? :type toupper: bool :return: result :return type: str """ res=word*n if (toupper): res=res.upper() return res
62a68c1ff577781a84a58f124beec8d31b0b456c
708,413
def add(a,b): """ This function adds two numbers together """ return a+b
96173657034d469ea43142179cd408e0c1f1e12d
708,414
def get_parser(dataset_name): """Returns a csv line parser function for the given dataset.""" def inat_parser(line, is_train=True): if is_train: user_id, image_id, class_id, _ = line return user_id, image_id, class_id else: image_id, class_id, _ = line return image_id, class_id def landmarks_parser(line, is_train=True): if is_train: user_id, image_id, class_id = line return user_id, image_id, class_id else: image_id, class_id = line return image_id, class_id parsers = { 'inat': inat_parser, 'landmarks': landmarks_parser, 'cifar': landmarks_parser # landmarks and cifar uses the same parser. } return parsers[dataset_name]
4901dde39ef6af9cab1adeacb50fff7b90950cd6
708,415
import math def mul_pdf(mean1, var1, mean2, var2): """ Multiply Gaussian (mean1, var1) with (mean2, var2) and return the results as a tuple (mean, var, scale_factor). Strictly speaking the product of two Gaussian PDFs is a Gaussian function, not Gaussian PDF. It is, however, proportional to a Gaussian PDF. `scale_factor` provides this proportionality constant Parameters ---------- mean1 : scalar mean of first Gaussian var1 : scalar variance of first Gaussian mean2 : scalar mean of second Gaussian var2 : scalar variance of second Gaussian Returns ------- mean : scalar mean of product var : scalar variance of product scale_factor : scalar proportionality constant Examples -------- >>> mul(1, 2, 3, 4) (1.6666666666666667, 1.3333333333333333) References ---------- Bromily. "Products and Convolutions of Gaussian Probability Functions", Tina Memo No. 2003-003. http://www.tina-vision.net/docs/memos/2003-003.pdf """ mean = (var1*mean2 + var2*mean1) / (var1 + var2) var = 1. / (1./var1 + 1./var2) S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \ math.sqrt(2 * math.pi * (var1 + var2)) return mean, var, S
8ecb925273cd0e4276b867687e81b0a26419f35f
708,416
def obfuscate_email(email): """Takes an email address and returns an obfuscated version of it. For example: test@example.com would turn into t**t@e*********m """ if email is None: return None splitmail = email.split("@") # If the prefix is 1 character, then we can't obfuscate it if len(splitmail[0]) <= 1: prefix = splitmail[0] else: prefix = f'{splitmail[0][0]}{"*"*(len(splitmail[0])-2)}{splitmail[0][-1]}' # If the domain is missing or 1 character, then we can't obfuscate it if len(splitmail) <= 1 or len(splitmail[1]) <= 1: return f"{prefix}" else: domain = f'{splitmail[1][0]}{"*"*(len(splitmail[1])-2)}{splitmail[1][-1]}' return f"{prefix}@{domain}"
36c230ed75fc75fc7ecd6dd2ea71a6b3310c4108
708,417
def parse_boolean(arg: str): """Returns boolean representation of argument.""" arg = str(arg).lower() if 'true'.startswith(arg): return True return False
2f0a214212aa43a8b27d9a3be04f14af67c586bc
708,418
def ascending_coin(coin): """Returns the next ascending coin in order. >>> ascending_coin(1) 5 >>> ascending_coin(5) 10 >>> ascending_coin(10) 25 >>> ascending_coin(2) # Other values return None """ if coin == 1: return 5 elif coin == 5: return 10 elif coin == 10: return 25
e927d8ac3f38d4b37de71711ac90d6ca2151a366
708,419
def get_key(rule_tracker, value): """ Given an event index, its corresponding key from the dictionary is returned. Parameters: rule_tracker (dict): Key-value pairs specific to a rule where key is an activity, pair is an event index value (int): Index of event in event log Returns: key (int): Position of value in rule_tracker """ for key in rule_tracker: if rule_tracker[key] == value: return key
1921e9a68d0df0867248ca83e2ba641101735fc7
708,421
def all_columns_empty(): """All columns are empty ... test will demoonstrate this edge case can be handled""" return [[] for i in range(0, 100)]
77a354978f82fd61d0f4d12db57a7fc455f4af28
708,422
def check_values_on_diagonal(matrix): """ Checks if a matrix made out of dictionary of dictionaries has values on diagonal :param matrix: dictionary of dictionaries :return: boolean """ for line in matrix.keys(): if line not in matrix[line].keys(): return False return True
bc7979adcfb5dc7c19b3cdb3830cf2397c247846
708,423
from pathlib import Path def get_current_dir(): """ Get the directory of the executed Pyhton file (i.e. this file) """ # Resolve to get rid of any symlinks current_path = Path(__file__).resolve() current_dir = current_path.parent return current_dir
c0e6fa1300970226fce42bf57fe2d2ed6b3e3604
708,424
import csv def build_gun_dictionary(filename): """Build a dictionary of gun parameters from an external CSV file: - Key: the gun designation (e.g. '13.5 in V' or '12 in XI') - Value: a list of parameters, in the order: * caliber (in inches) * maxrange (maximum range in yards) * longtohit (chance to hit per gun and minute at long range) * longmin (minimum range considered to be long) * effectivetohit (chance to hit per gun and minute at effective range) * effectivemin (minimum range considered to be effective) * shorttohit (chance to hit per gun and minute at short range) """ gundict = {} with open(filename) as sourcefile: reader = csv.reader(sourcefile, delimiter=",") next(reader) for row in reader: gundata = list(row) gundict[gundata[0]] = list(map(float, gundata[1:])) return gundict
b9e38d766430d44b94ae9fa64c080416fdeb8482
708,425
import string def column_to_index(ref): """ カラムを示すアルファベットを0ベース序数に変換する。 Params: column(str): A, B, C, ... Z, AA, AB, ... Returns: int: 0ベース座標 """ column = 0 for i, ch in enumerate(reversed(ref)): d = string.ascii_uppercase.index(ch) + 1 column += d * pow(len(string.ascii_uppercase),i) return column-1
7a6f89fa238d3d47a1e45b2e83821dbd4e8b23f8
708,426
import numpy as np def stdev_time(arr1d, stdev): """ detects breakpoints through multiple standard deviations and divides breakpoints into timely separated sections (wanted_parts) - if sigma = 1 -> 68.3% - if sigma = 2 -> 95.5% - if sigma = 2.5 -> 99.0% - if sigma = 3 -> 99.7% - if sigma = 4 -> 99.9% ---------- arr1d: numpy.array 1D array representing the time series for one pixel stdev: float number multiplied with standard deviation to define the probability space for a breakpoint Returns ---------- numpy.int32 0 = no breakpoint over time 15 = breakpoint in the 1st section 16 = breakpoint in the 2nd section 17 = breakpoint in the 3rd section 18 = breakpoint in the 4th section 19 = breakpoint in the 5th section 31 = breakpoint in the 1st AND 2nd section 32 = breakpoint in the 1st AND 3rd section 33 = breakpoint in the 1st AND 4th section OR breakpoint in the 2nd AND 3rd section 34 = breakpoint in the 1st AND 5th section OR 2nd AND 4th section 35 = breakpoint in the 2nd section AND 5th section OR 3rd AND 4th section 36 = breakpoint in the 3rd AND 5th section 37 = breakpoint in the 4th AND 5th section 48 = breakpoint in the 1st, 2nd AND 3rd section 49 = breakpoint in the 1st, 2nd AND 4th section 50 = breakpoint in the 1st, 2nd AND 5th section OR 1st, 3rd AND 4th section 51 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 4th section 52 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 5th section 53 = breakpoint in the 2nd, 4th AND 5th section 54 = breakpoint in the 3rd, 4th AND 5th section 66 = breakpoint in the 1st, 2nd, 3rd AND 4th section 67 = breakpoint in the 1st, 2nd, 3rd AND 5th section 68 = breakpoint in the 1st, 2nd, 4th AND 5th section 69 = breakpoint in the 1st, 3rd, 4th AND 5th section 70 = breakpoint in the 2nd, 3rd , 4th AND 5th section 85 = breakpoints in all section """ time_series = arr1d arr_shape = arr1d.shape[0] time_series_index = np.indices((arr_shape,))[0] # internal function to split time series in n sub time series def split_list(alist, wanted_parts=1): # based on: https://stackoverflow.com/a/752562 length = len(alist) return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts] for i in range(wanted_parts)] # split time series and list of time series indices in 4 subarrays time_series_split = split_list(time_series, wanted_parts=5) time_series_index_split = split_list(time_series_index, wanted_parts=5) # calculate linear regression for each time series subarray mini_list = [] sigma_list = [] for i in range(0, len(time_series_index_split)): mea = np.mean(time_series_split[i]) std_mea = stdev * np.std(time_series_split[i]) mini = min(time_series_split[i]) sigma = mea - std_mea i += 1 mini_list = [mini_list, mini] sigma_list = [sigma_list, sigma] # weird list append, cause .append doesnt work with multiprocessing # check for dropping slope values from one fifth of time series to next temp = 0 if mini_list[0][0][0][0][1] < sigma_list[0][0][0][0][1]: temp = temp + 15 if mini_list[0][0][0][1] < sigma_list[0][0][0][1]: temp = temp + 16 if mini_list[0][0][1] < sigma_list[0][0][1]: temp = temp + 17 if mini_list[0][1] < sigma_list[0][1]: temp = temp + 18 if mini_list[1] < sigma_list[1]: temp = temp + 19 if temp == 0: return 0 return temp
b243f1d4ba904cbc2fb0e46b37305c857fce0be1
708,427
def find_index(predicate, List): """ (a → Boolean) → [a] → [Number] Return the index of first element that satisfy the predicate """ for i, x in enumerate(List): if predicate(x): return i
0c6010b8b169b7bfa780ca03c0551f189bda892a
708,428
def _can_be_quoted(loan_amount, lent_amounts): """ Checks if the borrower can obtain a quote. To this aim, the loan amount should be less than or equal to the total amounts given by lenders. :param loan_amount: the requested loan amount :param lent_amounts: the sum of the amounts given by lenders :return: True if the borrower can get a quote, False otherwise """ return sum(lent_amounts) - loan_amount >= 0;
6fd717f3d0e844752e07e9dd435ff72eaa4b34c9
708,429
import os import sys def get_bt_mac_lsb_offset(any_path,config_file): """ Obains the offset of the BT_MAC LSB from the BASE_MAC LSB by sdkconfig inspection. """ mac_sdkconfig_string='CONFIG_NUMBER_OF_UNIVERSAL_MAC_ADDRESS' sdkconfig=os.path.join(any_path,config_file) config_lines=open(sdkconfig).readlines() for line in config_lines: if mac_sdkconfig_string in line: split_line=line.split('=') if '4' in split_line[1]: return 2 elif '2' in split_line[1]: return 1 else: print("Unable to find valid value of sdkconfig variable {mac_var}" .format(mac_var=mac_sdkconfig_string)) sys.exit(1)
8122c9fb3899d9316d9f223710e9a2c661f3e2fb
708,430
def no_trajectory_dct(): """ Dictionary expected answer """ return ()
95cc96bbfb23e621511f99f4d19f1af5a31bcc0f
708,431
import re def output_name(ncfile): """output_name. Args: ncfile: """ ncfile_has_datetime = re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}', ncfile) if ncfile_has_datetime: forecast_time = ncfile_has_datetime.group() else: raise Exception("ncfile doesn't have datetime data.") outname = (forecast_time + "apcp") return outname
81d04e9fe572e6ba2eb97506d4690818008a1aaf
708,432
def text_to_int(sentence, map_dict, max_length=20, is_target=False): """ 对文本句子进行数字编码 @param sentence: 一个完整的句子,str类型 @param map_dict: 单词到数字的映射,dict @param max_length: 句子的最大长度 @param is_target: 是否为目标语句。在这里要区分目标句子与源句子,因为对于目标句子(即翻译后的句子)我们需要在句子最后增加<EOS> """ # 用<PAD>填充整个序列 text_to_idx = [] # unk index unk_idx = map_dict.get("<UNK>") pad_idx = map_dict.get("<PAD>") eos_idx = map_dict.get("<EOS>") # 如果是输入源文本 if not is_target: for word in sentence.lower().split(): text_to_idx.append(map_dict.get(word, unk_idx)) # 否则,对于输出目标文本需要做<EOS>的填充最后 else: for word in sentence.lower().split(): text_to_idx.append(map_dict.get(word, unk_idx)) text_to_idx.append(eos_idx) # 如果超长需要截断 if len(text_to_idx) > max_length: return text_to_idx[:max_length] # 如果不够则增加<PAD> else: text_to_idx = text_to_idx + [pad_idx] * (max_length - len(text_to_idx)) return text_to_idx
9ac1928ff0a71e653c999a173ee4ea9127b29913
708,434
from numpy import array def beamcenter_mask(): """Returns beamcenter mask as an array. Given the PSF and the dimensions of the beamstop, the minimum intensity around beamcenter occurs at a radius of 3 pixels, hence a 7x7 mask.""" return array([[0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,1,1,1,0,0], [0,0,1,1,1,0,0], [0,0,1,1,1,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0]])
6efb592aa88c3da57010ab4a70144d645ae916ea
708,435
import sys def attach_tfidf_weights(storage, vocab, tf_arr): """Appends tf-idf weights to each word """ wordlist = vocab storage_weighted = [] for i in range(len(storage)): sys.stdout.write(str(i)+",") sys.stdout.flush() docweights = [] stor_list = storage[i].split() for word in stor_list: words = [word,0] for j in range(len(wordlist)): if (wordlist[j] == word): words[1] = tf_arr[i][j] docweights.append(words) storage_weighted.append(docweights) return storage_weighted
5fe52dd87d091860dc3a7482a72860abbb2b49dd
708,436
def expanded_bb( final_points): """computation of coordinates and distance""" left, right = final_points left_x, left_y = left right_x, right_y = right base_center_x = (left_x+right_x)/2 base_center_y = (left_y+right_y)/2 dist_base = abs(complex(left_x, left_y)-complex(right_x, right_y ) ) return (int(base_center_x), int(base_center_y) ), dist_base
c033130b0d43ccf9cea3e075305cf464f958c62f
708,437
def extract_uris(data): """Convert a text/uri-list to a python list of (still escaped) URIs""" lines = data.split('\r\n') out = [] for l in lines: if l == chr(0): continue # (gmc adds a '\0' line) if l and l[0] != '#': out.append(l) return out
9f6ce28ecf94e07e03afca9852dd9952ed2a2488
708,438
import re def extract_info(filepath,pdbid,info_id_list): """Returns a dictionary where the key is pocket ID (starting at zero) and the value is a dictionary of information points.""" pockets_info = {} pocket_file = open(filepath+pdbid+'_out/'+pdbid+'_info.txt') pocket_lines = pocket_file.readlines() pocket_file.close() # create inner dictionaries counter = 0 for line in pocket_lines: if line[:6] == 'Pocket': pockets_info[counter] = {} counter += 1 # populate inner dictionaries for info_id in info_id_list: counter = 0 for line in pocket_lines: if line.lstrip()[:len(info_id)] == info_id: split = re.split(r'\s+',line.rstrip()) pockets_info[counter][info_id] = float(split[-1]) counter += 1 return pockets_info
aca4074bc1c48add487268641a66c6e80aa7dafb
708,439
def mult_int_list_int(): """ >>> mult_int_list_int() [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2] """ return 3 * [1, 2] * 2
cd34fa521ae3985f7770f96a1a8985e9473ee2b3
708,440
def to_dict(eds, properties=True, lnk=True): """ Encode the EDS as a dictionary suitable for JSON serialization. """ nodes = {} for node in eds.nodes: nd = { 'label': node.predicate, 'edges': node.edges } if lnk and node.lnk is not None: nd['lnk'] = {'from': node.cfrom, 'to': node.cto} if node.type is not None: nd['type'] = node.type if properties: props = node.properties if props: nd['properties'] = props if node.carg is not None: nd['carg'] = node.carg nodes[node.id] = nd return {'top': eds.top, 'nodes': nodes}
c1a777a0a81ad2e3b9197b3df5e0d35a5174d61f
708,441
def mock_modules_list(): """Standard module list without any issues""" return [ {"name": "foo", "module_type": "app", "supported_platforms": ["macos"]}, {"name": "bar", "module_type": "app"}, ]
c4f20e95e87950a414b0ac156e6a07ac79dcdf19
708,442
def SieveOfEratosthenes(limit=10**6): """Returns all primes not greater than limit.""" isPrime = [True]*(limit+1) isPrime[0] = isPrime[1] = False primes = [] for i in range(2, limit+1): if not isPrime[i]:continue primes += [i] for j in range(i*i, limit+1, i): isPrime[j] = False return primes
6d1e12d289c9bfcdfadf64f764deba077a09ffd1
708,443
import torch def iou_overlaps(b1, b2): """ Arguments: b1: dts, [n, >=4] (x1, y1, x2, y2, ...) b1: gts, [n, >=4] (x1, y1, x2, y2, ...) Returns: intersection-over-union pair-wise, generalized iou. """ area1 = (b1[:, 2] - b1[:, 0] + 1) * (b1[:, 3] - b1[:, 1] + 1) area2 = (b2[:, 2] - b2[:, 0] + 1) * (b2[:, 3] - b2[:, 1] + 1) # only for giou loss lt1 = torch.max(b1[:, :2], b2[:, :2]) rb1 = torch.max(b1[:, 2:4], b2[:, 2:4]) lt2 = torch.min(b1[:, :2], b2[:, :2]) rb2 = torch.min(b1[:, 2:4], b2[:, 2:4]) wh1 = (rb2 - lt1 + 1).clamp(min=0) wh2 = (rb1 - lt2 + 1).clamp(min=0) inter_area = wh1[:, 0] * wh1[:, 1] union_area = area1 + area2 - inter_area iou = inter_area / torch.clamp(union_area, min=1) ac_union = wh2[:, 0] * wh2[:, 1] + 1e-7 giou = iou - (ac_union - union_area) / ac_union return iou, giou
ba9b445223fea5ea8332a189b297c8c40205a4e5
708,444
def aggregate(data): """Aggregate the data.""" return NotImplemented
2d7fd424d70858e6065dca34991308f0ed6c945c
708,445
import copy def iupac_fasta_converter(header, sequence): """ Given a sequence (header and sequence itself) containing iupac characters, return a dictionary with all possible sequences converted to ATCG. """ iupac_dict = {"R": "AG", "Y": "CT", "S": "GC", "W": "AT", "K": "GT", "M": "AC", "B": "CGT", "D": "AGT", "H": "ACT", "V": "ACG", "N": "ACGT"} iupac_dict = {k: list(iupac_dict[k]) for k in list(iupac_dict.keys())} if sequence.upper().count("N") >= 10: return {header: sequence} sequence = list(sequence.upper()) result_list = [] def iupac_recurse(seq): for i in range(len(seq)): if seq[i] in list(iupac_dict.keys()): iup = iupac_dict[seq[i]] for i_seq in iup: new_seq = copy.deepcopy(seq) new_seq[i] = i_seq iupac_recurse(new_seq) break else: result_list.append("".join(seq)) iupac_recurse(sequence) if len(result_list) == 1: return {header: result_list[0]} else: return {header + "-" + str(i): result_list[i] for i in range(len(result_list))}
95a713e87564c4d8e807e1d476439568a562731b
708,446
def get_tpr_from_threshold(scores,labels, threshold_list): """Calculate the recall score list from the threshold score list. Args: score_target: list of (score,label) threshold_list: list, the threshold list Returns: recall_list: list, the element is recall score calculated by the correspond threshold """ tpr_list = [] hack_scores = [] for score, label in zip(scores,labels): if label == 1: hack_scores.append(float(score)) hack_scores.sort(reverse=True) hack_nums = len(hack_scores) for threshold in threshold_list: hack_index = 0 while hack_index < hack_nums: if hack_scores[hack_index] <= threshold: break else: hack_index += 1 if hack_nums != 0: tpr = hack_index * 1.0 / hack_nums else: tpr = 0 tpr_list.append(tpr) return tpr_list
97796fb0f1ba9d41cf6e9c4bb21d1ca8f94499e3
708,447
def updating_node_validation_error(address=False, port=False, id=False, weight=False): """ Verified 2015-06-16: - when trying to update a CLB node's address/port/id, which are immutable. - when trying to update a CLB node's weight to be < 1 or > 100 At least one of address, port, id, and weight should be `True` for this error to apply. :param bool address: Whether the address was passed to update :param bool port: Whether the port was passed to update :param bool id: Whether the ID was passed to update :param bool weight: Whether the weight was passed to update and wrong :return: a `tuple` of (dict body message, 400 http status code) """ messages = [] if address: messages.append("Node ip field cannot be modified.") if port: messages.append("Port field cannot be modified.") if weight: messages.append("Node weight is invalid. Range is 1-100. " "Please specify a valid weight.") if id: messages.append("Node id field cannot be modified.") return( { "validationErrors": { "messages": messages }, "message": "Validation Failure", "code": 400, "details": "The object is not valid" }, 400 )
68c5fdda121950c679afe446bfd7fb19331deb40
708,448
def parse_numbers(numbers): """Return list of numbers.""" return [int(number) for number in numbers]
ee79d4e15cbfb269f7307710d9ad4735687f7128
708,449