content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def norm_potential(latitude, longitude, h, refell, lmax): """ Calculates the normal potential at a given latitude and height Arguments --------- latitude: latitude in degrees longitude: longitude in degrees height: height above reference ellipsoid in meters refell: reference ellipsoid name lmax: maximum spherical harmonic degree Returns ------- U: normal potential at height h dU_dr: derivative of normal potential with respect to radius dU_dtheta: derivative of normal potential with respect to theta """ #-- get ellipsoid parameters for refell ellip = ref_ellipsoid(refell) a = np.float128(ellip['a']) ecc1 = np.float128(ellip['ecc1']) GM = np.float128(ellip['GM']) J2 = np.float128(ellip['J2']) #-- convert from geodetic latitude to geocentric latitude latitude_geodetic_rad = (np.pi*latitude/180.0).astype(np.float128) longitude_rad = (np.pi*longitude/180.0).astype(np.float128) N = a/np.sqrt(1.0 - ecc1**2.0*np.sin(latitude_geodetic_rad)**2.0) X = (N + h) * np.cos(latitude_geodetic_rad) * np.cos(longitude_rad) Y = (N + h) * np.cos(latitude_geodetic_rad) * np.sin(longitude_rad) Z = (N * (1.0 - ecc1**2.0) + h) * np.sin(latitude_geodetic_rad) rr = np.sqrt(X**2.0 + Y**2.0 + Z**2.0) latitude_geocentric = np.arctan(Z / np.sqrt(X**2.0 + Y**2.0)) #-- calculate even zonal harmonics n = np.arange(2, 12+2, 2, dtype=np.float128) J2n = cosine_even_zonals(J2, ecc1, n/2.0) #-- normalized cosine harmonics: Cn = -Jn/np.sqrt(2.0*n+1.0) #-- J2 = 0.108262982131e-2 C_2 = -J2n[0]/np.sqrt(5.0) #-- J4 = -0.237091120053e-5 C_4 = -J2n[1]/np.sqrt(9.0) #-- J6 = 0.608346498882e-8 C_6 = -J2n[2]/np.sqrt(13.0) #-- J8 = -0.142681087920e-10 C_8 = -J2n[3]/np.sqrt(17.0) #-- J10 = 0.121439275882e-13 C_10 = -J2n[4]/np.sqrt(21.0) #-- J12 = 0.205395070709e-15 C_12 = -J2n[5]/np.sqrt(25.0) #-- calculate legendre polynomials at latitude and their first derivative Pl,dPl = legendre_polynomials(lmax, np.sin(latitude_geocentric), ASTYPE=np.float128) #-- normal potentials and derivatives U = (GM/rr) * (1.0 + (a/rr)**2.*C_2*Pl[2,:] + (a/rr)**4.*C_4*Pl[4,:] + \ (a/rr)**6.*C_6*Pl[6,:] + (a/rr)**8.*C_8*Pl[8,:] + \ (a/rr)**10.*C_10*Pl[10,:] + (a/rr)**12.*C_12*Pl[12,:]) dU_dr = GM * (-1.0 / rr**2.0 - 3.0*(a**2.0/rr**4.0)*C_2*Pl[2,:] - \ 5.0*(a**4.0/rr**6.0)*C_4*Pl[4,:] -7.0*(a**6.0/rr**8.0)*C_6*Pl[6,:] - \ 9.0*(a**8.0/rr**10.)*C_8*Pl[8,:] -11.*(a**10./rr**12.)*C_10*Pl[10,:] - \ 13.*(a**12./rr**14.)*C_12*Pl[12,:]) dU_dtheta = (GM/rr) * (1.0 + (a/rr)**2.0*C_2*dPl[2,:] + \ (a/rr)**4.0*C_4*dPl[4,:] + (a/rr)**6.0*C_6*dPl[6,:] + \ (a/rr)**8.0*C_8*dPl[8,:] + (a/rr)**10.0*C_10*dPl[10,:] + \ (a/rr)**12.0*C_12*dPl[12,:]) #-- return the potentials return (U, dU_dr, dU_dtheta)
5fc9f26a206c4fced5ebd434c84dda26621c08dc
3,659,069
def get_pp_gene_chains(chain_class_file, v=False): """Get gene: pp chains dict.""" gene_to_pp_chains = defaultdict(list) # init the dict f = open(chain_class_file, "r") # open file with classifications f.__next__() # skip header for line in f: line_data = line.rstrip().split("\t") # line contains the following fields" # gene orthologs paralogs trans p_pseudogenes trans = line_data[0] # proc_pseudogene chains are in the 4th field pp_genes_field = line_data[4] if pp_genes_field == "0": # it 0 -> no ppgene chains -> skip continue # parse comma-separated string and save to dict pp_genes = [int(x) for x in pp_genes_field.split(",") if x != ""] gene_to_pp_chains[trans] = pp_genes f.close() if v: verbose(f"Extracted {len(gene_to_pp_chains)} genes with proc pseudogenes") return gene_to_pp_chains
d09fe5f7e8aaed0b8aa46593931b3cda655f56e3
3,659,071
def skip_object(change_mode, change): """ If `Mode` is `change`: we do not care about the `Conditions` Else: If `cfn` objects: - We can omit the `Conditions`, objects will be involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.) - In case `Conditions` is declared, objects will be involed when `Mode` matches with `Conditions`. If `aws` objects: we must declare `Conditions` and match with `Mode`, or else the engine will skip that Object/Block. OR If `Mode` is `change`: we do not care about the `Conditions` Else: If we omit the `Conditions`: - Only `cfn` objects are involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.) - Others will be skipped. Else: Objects will be involed when `Mode` matches with `Conditions`. Return: - `True` means skipped - `False` means involved """ if (change_mode!=CHANGE_MODE_CHANGE): if ('Conditions' not in change): if (change['Object']==STR_CFN) and (change_mode in [CHANGE_MODE_PROVISION,CHANGE_MODE_DESTROY]): return False return True elif (change_mode not in change['Conditions']): return True return False
a80365acc6f3390818f4c56a44ad4923f771fcee
3,659,073
def rootfinder(*args): """ rootfinder(str name, str solver, dict:SX rfp, dict opts) -> Function Create a solver for rootfinding problems Takes a function where one of the rootfinder(str name, str solver, dict:MX rfp, dict opts) -> Function rootfinder(str name, str solver, Function f, dict opts) -> Function > rootfinder(str name, str solver, dict:SX rfp, dict opts) ------------------------------------------------------------------------ Create a solver for rootfinding problems Takes a function where one of the inputs is unknown and one of the outputs is a residual function that is always zero, defines a new function where the the unknown input has been replaced by a guess for the unknown and the residual output has been replaced by the calculated value for the input. For a function [y0, y1, ...,yi, .., yn] = F(x0, x1, ..., xj, ..., xm), where xj is unknown and yi=0, defines a new function [y0, y1, ...,xj, .., yn] = G(x0, x1, ..., xj_guess, ..., xm), xj and yi must have the same dimension and d(yi)/d(xj) must be invertable. By default, the first input is unknown and the first output is the residual. General information =================== >List of available options +------------------+-----------------+------------------+------------------+ | Id | Type | Description | Used in | +==================+=================+==================+==================+ | common_options | OT_DICT | Options for | casadi::OracleFu | | | | auto-generated | nction | | | | functions | | +------------------+-----------------+------------------+------------------+ | constraints | OT_INTVECTOR | Constrain the | casadi::Rootfind | | | | unknowns. 0 | er | | | | (default): no | | | | | constraint on | | | | | ui, 1: ui >= | | | | | 0.0, -1: ui <= | | | | | 0.0, 2: ui > | | | | | 0.0, -2: ui < | | | | | 0.0. | | +------------------+-----------------+------------------+------------------+ | error_on_fail | OT_BOOL | When the | casadi::Rootfind | | | | numerical | er | | | | process returns | | | | | unsuccessfully, | | | | | raise an error | | | | | (default false). | | +------------------+-----------------+------------------+------------------+ | implicit_input | OT_INT | Index of the | casadi::Rootfind | | | | input that | er | | | | corresponds to | | | | | the actual root- | | | | | finding | | +------------------+-----------------+------------------+------------------+ | implicit_output | OT_INT | Index of the | casadi::Rootfind | | | | output that | er | | | | corresponds to | | | | | the actual root- | | | | | finding | | +------------------+-----------------+------------------+------------------+ | jacobian_functio | OT_FUNCTION | Function object | casadi::Rootfind | | n | | for calculating | er | | | | the Jacobian | | | | | (autogenerated | | | | | by default) | | +------------------+-----------------+------------------+------------------+ | linear_solver | OT_STRING | User-defined | casadi::Rootfind | | | | linear solver | er | | | | class. Needed | | | | | for | | | | | sensitivities. | | +------------------+-----------------+------------------+------------------+ | linear_solver_op | OT_DICT | Options to be | casadi::Rootfind | | tions | | passed to the | er | | | | linear solver. | | +------------------+-----------------+------------------+------------------+ | monitor | OT_STRINGVECTOR | Set of user | casadi::OracleFu | | | | problem | nction | | | | functions to be | | | | | monitored | | +------------------+-----------------+------------------+------------------+ | specific_options | OT_DICT | Options for | casadi::OracleFu | | | | specific auto- | nction | | | | generated | | | | | functions, | | | | | overwriting the | | | | | defaults from | | | | | common_options. | | | | | Nested | | | | | dictionary. | | +------------------+-----------------+------------------+------------------+ >Input scheme: casadi::RootfinderInput (ROOTFINDER_NUM_IN = 2) +---------------+-------+---------------------------------+ | Full name | Short | Description | +===============+=======+=================================+ | ROOTFINDER_X0 | x0 | Initial guess for the solution. | +---------------+-------+---------------------------------+ | ROOTFINDER_P | p | Parameters. | +---------------+-------+---------------------------------+ >Output scheme: casadi::RootfinderOutput (ROOTFINDER_NUM_OUT = 1) +--------------+-------+--------------------------------------+ | Full name | Short | Description | +==============+=======+======================================+ | ROOTFINDER_X | x | Solution to the system of equations. | +--------------+-------+--------------------------------------+ List of plugins =============== - kinsol - fast_newton - nlpsol - newton Note: some of the plugins in this list might not be available on your system. Also, there might be extra plugins available to you that are not listed here. You can obtain their documentation with Rootfinder.doc("myextraplugin") -------------------------------------------------------------------------------- kinsol ------ KINSOL interface from the Sundials suite >List of available options +---------------------------+-----------------+----------------------------+ | Id | Type | Description | +===========================+=================+============================+ | abstol | OT_DOUBLE | Stopping criterion | | | | tolerance | +---------------------------+-----------------+----------------------------+ | disable_internal_warnings | OT_BOOL | Disable KINSOL internal | | | | warning messages | +---------------------------+-----------------+----------------------------+ | exact_jacobian | OT_BOOL | Use exact Jacobian | | | | information | +---------------------------+-----------------+----------------------------+ | f_scale | OT_DOUBLEVECTOR | Equation scaling factors | +---------------------------+-----------------+----------------------------+ | iterative_solver | OT_STRING | gmres|bcgstab|tfqmr | +---------------------------+-----------------+----------------------------+ | linear_solver_type | OT_STRING | dense|banded|iterative|use | | | | r_defined | +---------------------------+-----------------+----------------------------+ | lower_bandwidth | OT_INT | Lower bandwidth for banded | | | | linear solvers | +---------------------------+-----------------+----------------------------+ | max_iter | OT_INT | Maximum number of Newton | | | | iterations. Putting 0 sets | | | | the default value of | | | | KinSol. | +---------------------------+-----------------+----------------------------+ | max_krylov | OT_INT | Maximum Krylov space | | | | dimension | +---------------------------+-----------------+----------------------------+ | pretype | OT_STRING | Type of preconditioner | +---------------------------+-----------------+----------------------------+ | strategy | OT_STRING | Globalization strategy | +---------------------------+-----------------+----------------------------+ | u_scale | OT_DOUBLEVECTOR | Variable scaling factors | +---------------------------+-----------------+----------------------------+ | upper_bandwidth | OT_INT | Upper bandwidth for banded | | | | linear solvers | +---------------------------+-----------------+----------------------------+ | use_preconditioner | OT_BOOL | Precondition an iterative | | | | solver | +---------------------------+-----------------+----------------------------+ -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- fast_newton ----------- Implements simple newton iterations to solve an implicit function. >List of available options +------------+-----------+-------------------------------------------------+ | Id | Type | Description | +============+===========+=================================================+ | abstol | OT_DOUBLE | Stopping criterion tolerance on ||g||__inf) | +------------+-----------+-------------------------------------------------+ | abstolStep | OT_DOUBLE | Stopping criterion tolerance on step size | +------------+-----------+-------------------------------------------------+ | max_iter | OT_INT | Maximum number of Newton iterations to perform | | | | before returning. | +------------+-----------+-------------------------------------------------+ -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- nlpsol ------ -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- newton ------ Implements simple newton iterations to solve an implicit function. >List of available options +-----------------+-----------+--------------------------------------------+ | Id | Type | Description | +=================+===========+============================================+ | abstol | OT_DOUBLE | Stopping criterion tolerance on max(|F|) | +-----------------+-----------+--------------------------------------------+ | abstolStep | OT_DOUBLE | Stopping criterion tolerance on step size | +-----------------+-----------+--------------------------------------------+ | max_iter | OT_INT | Maximum number of Newton iterations to | | | | perform before returning. | +-----------------+-----------+--------------------------------------------+ | print_iteration | OT_BOOL | Print information about each iteration | +-----------------+-----------+--------------------------------------------+ -------------------------------------------------------------------------------- Joel Andersson > rootfinder(str name, str solver, dict:MX rfp, dict opts) > rootfinder(str name, str solver, Function f, dict opts) ------------------------------------------------------------------------ """ return _casadi.rootfinder(*args)
13ac40736849b6b4240dc5d22ad36aa472583bcb
3,659,074
def f18(x, rotation=None, shift=None, shuffle=None): """ Hybrid Function 8 (N=5) Args: x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100. rotation (matrix): Optional rotation matrix. If None (default), the official matrix from the benchmark suite will be used. shift (array): Optional shift vector. If None (default), the official vector from the benchmark suite will be used. shuffle (array): Optionbal shuffle vector. If None (default), the official permutation vector from the benchmark suite will be used. """ nx = len(x) if rotation is None: rotation = transforms.rotations[nx][17] if shift is None: shift = transforms.shifts[17][:nx] if shuffle is None: shuffle = transforms.shuffles[nx][7] x_transformed = np.matmul(rotation, x - shift) x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.2, 0.2, 0.2]) y = basic.high_conditioned_elliptic(x_parts[0]) y += basic.ackley(x_parts[1]) y += basic.rastrigin(x_parts[2]) y += basic.h_g_bat(x_parts[3]) y += basic.discus(x_parts[4]) return y + 1800.0
d76b4aef256bdc72bc077adefda5e7a93f8ea500
3,659,075
def shake_256_len(data: bytes, length: int) -> hashes.MessageDigest: """ Convenience function to hash a message. """ return HashlibHash.hash(hashes.shake_256_len(length), data)
c59f1a224264649573d53b67f170c7238b5a9840
3,659,076
def rgb_to_cmyk(color_values: tp.List[float]) -> tp.List[float]: """Converts list of RGB values to CMYK. :param color_values: (list) 3-member RGB color value list :return: (list) 4-member CMYK color value list """ if color_values == [0.0, 0.0, 0.0]: return [0.0, 0.0, 0.0, 1.0] r, g, b = color_values c = 1.0 - r m = 1.0 - g y = 1.0 - b min_cmy = min(c, m, y) c = (c - min_cmy) / (1 - min_cmy) m = (m - min_cmy) / (1 - min_cmy) y = (y - min_cmy) / (1 - min_cmy) k = min_cmy return [c, m, y, k]
f0054c92e862c5f0a8b09f94c115c03150b3363b
3,659,077
import re def parse_pgt_programmearray(url): """ Parse filter.js programmearray for pgt information :param url: base url for timetabling system :return: pgt programme name to id dict """ # get filter.js file source = get_filterjs(url) name_to_id = {} # e.g. programmearray[340] [0] = "MSc Finance and Investment (Business Analytics)/F/02 - MSc Finance and # Investment (Business Analytics)"; matches = re.findall(r'programmearray\[(\d{1,3})\] \[0\] = "(.*)";\s+' r'programmearray\[\1\] \[1\] = ".*";\s+' r'programmearray\[\1\] \[2\] = "(PGT/.*)"', source) for match in matches: # match e.g. ('0', 'MA Applied Linguistics/F/01 - EG04 Applied Linguistics', 'PGT/C1014/C7PAPLST/F/01') name_to_id[match[1]] = match[2] return name_to_id
b7901c37cdb931dce75a77422394f98b5e3898d1
3,659,079
def distance_calc(x1, y1, x2, y2): """Calculates distance between two points""" return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
4c0001d90d38371a5336e8163fbf63b3d6e95834
3,659,080
import requests def LoadNasaData(lat, lon, show= False, selectparms= None): """ Execute a request from NASA API for 10 years of atmospheric data required to prepare daily statistical data used in Solar Insolation calculations """ cmd = formulateRequest(-0.2739, 36.3765, selectparms) jdi = requests.get(cmd[0]).json() cols = cmd[1] df = pd.json_normalize(jdi['features'][0]['properties']['parameter'][cols[0]]).T df.index = pd.to_datetime(df.index) df.rename(columns={0: cols[0]}, inplace= True) for c in cols[1:]: dfc = pd.json_normalize(jdi['features'][0]['properties']['parameter'][c]).T dfc.index = pd.to_datetime(df.index) dfc.rename(columns={0: c}, inplace= True) df = df.join(dfc) df['DayofYear'] = df.index.dayofyear df = df[df['DayofYear'] != 366] #drop a day for leap years atmo_dict = dict() dg = df.groupby('DayofYear') for col in cols: dp = pd.DataFrame(dg[col].min()) dp.rename(columns={col: 'Min'}, inplace= True) atmo_dict[col] = dp dp = pd.DataFrame(dg[col].max()) dp.rename(columns={col: 'Max'}, inplace= True) atmo_dict[col] = atmo_dict[col].join(dp) dp = pd.DataFrame(dg[col].mean()) dp.rename(columns={col: 'S-Mean'}, inplace= True) atmo_dict[col] = atmo_dict[col].join(dp) dp = pd.DataFrame(dg[col].std()) dp.rename(columns={col: 'STDV'}, inplace= True) atmo_dict[col] = atmo_dict[col].join(dp) return atmo_dict
a2f61d20f46feee5d86bad2525c3bc20c3e00e14
3,659,081
from datetime import datetime def mmyy_date_slicer(date_str): """Return start and end point for given date in mm-yy format. :param date_str: date in mmyy format, i.e. "1222" or "0108". :return: start and end date string for a given mmyy formatted date string """ # Initialize output start = "" end = "" if mmyy_valid_date(date_str): today = date.today() # Check if date is in the future dt_check = datetime.strptime(date_str, "%m%y") if dt_check.date() <= today: # Determine the start date string datetime_object = datetime.strptime(date_str[0:2], "%m") mo = datetime_object.strftime("%b") yyyy = f"20{date_str[2:]}" start = f'1 {mo}, {yyyy}' # Determine the end date string. mm = int(date_str[0:2]) if mm == today.month: pass elif mm == 12: end = f"1 Jan, {int(yyyy)+1}" else: mm1 = int(date_str[0:2]) + 1 datetime_object = datetime.strptime(f"{mm1}", "%m") mo1 = datetime_object.strftime("%b") end = f'1 {mo1}, {yyyy}' else: # print(f'date in the future! > {date_str}') return "", "" else: # print(f'date malformed! > {date_str}') return "", "" return start, end
f1c7f74f1824d5b4c410f3d8cc6ade15571fe3ca
3,659,082
import typing def constant_arg(name: str): """ Promises that the given arg will not be modified Only affects mutable data types Removes the need to copy the data during inlining """ def annotation(target: typing.Callable): optimiser = _schedule_optimisation(target) optimiser.constant_args.add(name) return target return annotation
fdd132d3beea900b81bfe645616b0c20933e22e3
3,659,083
def place_connection(body): # noqa: E501 """Place an connection request from the SDX-Controller # noqa: E501 :param body: order placed for creating a connection :type body: dict | bytes :rtype: Connection """ if connexion.request.is_json: body = Connection.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
c69520f428f257a9eb3df97e060ad2e5cde45c94
3,659,084
def tag_list(request): """展示所有标签""" return render(request, 'admin/tags_list.html')
44b33197b3f8cb3467c8e7d32d4538f4dc2833b1
3,659,085
import requests def scan_url(urls): """ Scan the url using the API Args: urls: the list of urls Returns: A tuple of a bool indicating if all the urls are safe and a list indicating the safeness of individual urls """ is_safe = True safe_list = [True] * len(urls) safe_browsing_url = "https://safebrowsing.googleapis.com/v4/threatMatches:find" params = {"key": GOOGLE_TOKEN} json = { "threatInfo": { "threatTypes": [ "THREAT_TYPE_UNSPECIFIED", "MALWARE", "SOCIAL_ENGINEERING", "UNWANTED_SOFTWARE", "POTENTIALLY_HARMFUL_APPLICATION", ], "platformTypes": ["ANY_PLATFORM"], "threatEntryTypes": ["URL"], "threatEntries": [{"url": url} for url in urls], } } r = requests.post(safe_browsing_url, params=params, json=json) if r.status_code == 200: results = r.json() if "matches" in results and results["matches"]: is_safe = False matches = results["matches"] urls_dict = {k: v for v, k in enumerate(urls)} for match in matches: safe_list[urls_dict[match["threat"]["url"]]] = False return is_safe, safe_list
9d3217a4c69a6a521d372b5692f93387ae7d61ad
3,659,086
def head_to_tree(head, len_, prune, subj_pos, obj_pos): """ Convert a sequence of head indexes into a tree object. """ head = head[:len_].tolist() root = None if prune < 0: nodes = [Tree() for _ in head] for i in range(len(nodes)): h = head[i] nodes[i].idx = i nodes[i].dist = -1 # just a filler if h == 0: root = nodes[i] else: nodes[h-1].add_child(nodes[i]) else: # find dependency path subj_pos = [i for i in range(len_) if subj_pos[i] == 0] obj_pos = [i for i in range(len_) if obj_pos[i] == 0] cas = None subj_ancestors = set(subj_pos) for s in subj_pos: h = head[s] tmp = [s] while h > 0: tmp += [h-1] subj_ancestors.add(h-1) h = head[h-1] if cas is None: cas = set(tmp) else: cas.intersection_update(tmp) obj_ancestors = set(obj_pos) for o in obj_pos: h = head[o] tmp = [o] while h > 0: tmp += [h-1] obj_ancestors.add(h-1) h = head[h-1] cas.intersection_update(tmp) # find lowest common ancestor if len(cas) == 1: lca = list(cas)[0] else: child_count = {k: 0 for k in cas} for ca in cas: if head[ca] > 0 and head[ca] - 1 in cas: child_count[head[ca] - 1] += 1 # the LCA has no child in the CA set for ca in cas: if child_count[ca] == 0: lca = ca break path_nodes = subj_ancestors.union(obj_ancestors).difference(cas) path_nodes.add(lca) # compute distance to path_nodes dist = [-1 if i not in path_nodes else 0 for i in range(len_)] for i in range(len_): if dist[i] < 0: stack = [i] while stack[-1] >= 0 and stack[-1] not in path_nodes: stack.append(head[stack[-1]] - 1) if stack[-1] in path_nodes: for d, j in enumerate(reversed(stack)): dist[j] = d else: for j in stack: if j >= 0 and dist[j] < 0: dist[j] = int(1e4) # aka infinity highest_node = lca nodes = [Tree() if dist[i] <= prune else None for i in range(len_)] for i in range(len(nodes)): if nodes[i] is None: continue h = head[i] nodes[i].idx = i nodes[i].dist = dist[i] if h > 0 and i != highest_node: assert nodes[h-1] is not None nodes[h-1].add_child(nodes[i]) root = nodes[highest_node] assert root is not None return root
0459e6ebb0c64a8170970d44e5c6bcde5bb6221c
3,659,087
def capped_subtraction(x, y): """Saturated arithmetics. Returns x - y truncated to the int64_t range.""" assert_is_int64(x) assert_is_int64(y) if y == 0: return x if x == y: if x == INT_MAX or x == INT_MIN: raise OverflowError( 'Integer NaN: subtracting INT_MAX or INT_MIN to itself') return 0 if x == INT_MAX or x == INT_MIN: return x if y == INT_MAX: return INT_MIN if y == INT_MIN: return INT_MAX return to_capped_int64(x - y)
c4a171497ff351c22df3fc831a1e840366a90c5b
3,659,088
def evaluate_points(func, begin, total_samps, var_list, attr): """ Inputs: func- the lambda function used to generate the data from the evaluation vector begin- the index to start at in the `attr` array total_samps- the total number of samples to generate var_list- list of the variables attr- the attribute that holds the values to be used in the evaluation vector Identical to evaluate_points_verbose, but doesn't check for a verbose option every iteration. This version also deals with indexing only part of eval_vect. """ var_count = len(var_list) term_count = func(np.zeros(var_count)).shape if len(term_count) > 0: term_count = term_count[1] # len(func(np.zeros(var_count))) else: term_count = 1 eval_vect = np.zeros([total_samps, var_count]) matrix = np.zeros([total_samps, term_count]) end = begin + total_samps for j in range(var_count): attr_arr = getattr(var_list[j], attr) eval_vect[:, j] = attr_arr[begin:end].T for i in range(total_samps): matrix[i, :] = func(eval_vect[i, :]) return matrix
68ba7eb95e8d26becbef58f14e3073f7ed184a5b
3,659,089
def corresponding_chromaticities_prediction_CIE1994(experiment=1): """ Returns the corresponding chromaticities prediction for *CIE 1994* chromatic adaptation model. Parameters ---------- experiment : integer or CorrespondingColourDataset, optional {1, 2, 3, 4, 6, 8, 9, 11, 12} *Breneman (1987)* experiment number or :class:`colour.CorrespondingColourDataset` class instance. Returns ------- tuple Corresponding chromaticities prediction. References ---------- :cite:`Breneman1987b`, :cite:`CIETC1-321994b` Examples -------- >>> from pprint import pprint >>> pr = corresponding_chromaticities_prediction_CIE1994(2) >>> pr = [(p.uv_m, p.uv_p) for p in pr] >>> pprint(pr) # doctest: +ELLIPSIS [(array([ 0.207, 0.486]), array([ 0.2273130..., 0.5267609...])), (array([ 0.449, 0.511]), array([ 0.4612181..., 0.5191849...])), (array([ 0.263, 0.505]), array([ 0.2872404..., 0.5306938...])), (array([ 0.322, 0.545]), array([ 0.3489822..., 0.5454398...])), (array([ 0.316, 0.537]), array([ 0.3371612..., 0.5421567...])), (array([ 0.265, 0.553]), array([ 0.2889416..., 0.5534074...])), (array([ 0.221, 0.538]), array([ 0.2412195..., 0.5464301...])), (array([ 0.135, 0.532]), array([ 0.1530344..., 0.5488239...])), (array([ 0.145, 0.472]), array([ 0.1568709..., 0.5258835...])), (array([ 0.163, 0.331]), array([ 0.1499762..., 0.4401747...])), (array([ 0.176, 0.431]), array([ 0.1876711..., 0.5039627...])), (array([ 0.244, 0.349]), array([ 0.2560012..., 0.4546263...]))] """ experiment_results = (convert_experiment_results_Breneman1987(experiment) if is_numeric(experiment) else experiment) with domain_range_scale(1): XYZ_t, XYZ_r = experiment_results.XYZ_t, experiment_results.XYZ_r xy_o1, xy_o2 = XYZ_to_xy([XYZ_t, XYZ_r]) uv_t = Luv_to_uv(XYZ_to_Luv(experiment_results.XYZ_ct, xy_o1), xy_o1) uv_m = Luv_to_uv(XYZ_to_Luv(experiment_results.XYZ_cr, xy_o2), xy_o2) Y_r = experiment_results.B_r E_o1, E_o2 = experiment_results.Y_t, experiment_results.Y_r XYZ_1 = experiment_results.XYZ_ct XYZ_2 = chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_r, E_o1, E_o2) uv_p = Luv_to_uv(XYZ_to_Luv(XYZ_2, xy_o2), xy_o2) return tuple([ CorrespondingChromaticitiesPrediction(experiment_results.name, uv_t[i], uv_m[i], uv_p[i]) for i in range(len(uv_t)) ])
f138ec844b2712d0d09be630f912f156aa50acbd
3,659,091
def interpExtrap(x, xp, yp): """numpy.interp interpolation function extended by linear extrapolation.""" y = np.interp(x, xp, yp) y = np.where(x < xp[0], yp[0]+(x-xp[0])*(yp[0]-yp[1])/(xp[0]-xp[1]), y) return np.where(x > xp[-1], yp[-1]+(x-xp[-1])*(yp[-1]-yp[-2]) / (xp[-1]-xp[-2]), y)
8a0acc55e146a29171ef6648897cd5eba7e23c12
3,659,092
import json def get_nome_socio(id): """pega o nome de um livro pelo id.""" if request.method == 'GET': try: socio = db.query_bd('select * from socio where id = "%s"' % id) if socio: print(socio) socio = socio[0] print(socio) content = { 'nome': socio['nome'], 'status': socio['status'] } return json.dumps(content) except Exception as e: print(e) return render_template('404.html')
8945ef5bbc46cd8b6c79903f1dd0d3d226860792
3,659,094
def processDeps(element: etree.Element, params: dict = {}) -> None: """Function to NAF deps layer to RDF Args: element: element containing the deps layer params: dict of params to store results Returns: None """ output = params["out"] for dep in element: if dep.tag == "dep": # depname = genDepName(params) # output.write(" xl:type naf-base:dep ;\n") rfunc = dep.attrib["rfunc"] to_term = dep.attrib["to_term"] from_term = dep.attrib["from_term"] output.write( "_:" + from_term + " " + "naf-rfunc:" + rfunc + " _:" + to_term + "\n" ) # for key in dep.attrib.keys(): # if (key != "id"): # if key == "rfunc": # output.write(" naf-base:"+attrib2pred(key)+' naf-base:'+dep.attrib[key]+' ;\n') # else: # output.write(" naf-base:"+attrib2pred(key)+' _:'+dep.attrib[key]+' ;\n') output.write(" .\n") return None
a096e42a3a036048daf97079f9e5bb78f5f068d9
3,659,095
def fit_solution_matrix(weights, design_matrix, cache=None, hash_decimal=10, fit_mat_key=None): """ Calculate the linear least squares solution matrix from a design matrix, A and a weights matrix W S = [A^T W A]^{-1} A^T W Parameters ---------- weights: array-like ndata x ndata matrix of data weights design_matrx: array-like ndata x n_fit_params matrix transforming fit_parameters to data cache: optional dictionary optional dictionary storing pre-computed fitting matrix. hash_decimal: int optional the number of decimals to use in hash for caching. default is 10 fit_mat_key: optional hashable variable optional key. If none is used, hash fit matrix against design and weighting matrix. Returns ----------- array-like n_fit_params x n_fit_params matrix S = [A^T W A]^{-1} A ^T W """ if cache is None: cache = {} ndata = weights.shape[0] if not weights.shape[0] == weights.shape[1]: raise ValueError("weights must be a square matrix") if not design_matrix.shape[0] == ndata: raise ValueError("weights matrix incompatible with design_matrix!") if fit_mat_key is None: opkey = ('fitting_matrix',) + tuple(np.round(weights.flatten(), hash_decimal))\ +tuple(np.round(design_matrix.flatten(), hash_decimal)) else: opkey = fit_mat_key if not opkey in cache: #check condition number cmat = np.conj(design_matrix.T) @ weights @ design_matrix #should there be a conjugation!?! if np.linalg.cond(cmat)>=1e9: warn('Warning!!!!: Poorly conditioned matrix! Your linear inpainting IS WRONG!') cache[opkey] = np.linalg.pinv(cmat) @ np.conj(design_matrix.T) @ weights else: try: cache[opkey] = np.linalg.inv(cmat) @ np.conj(design_matrix.T) @ weights except np.linalg.LinAlgError as error: print(error) cache[opkey] = None return cache[opkey]
3b237600ab2ae266cec1cdc3f1fc650cc02b82d8
3,659,096
def nessus_vuln_check(request): """ Get the detailed vulnerability information. :param request: :return: """ if request.method == 'GET': id_vul = request.GET['vuln_id'] else: id_vul = '' vul_dat = nessus_report_db.objects.filter(vul_id=id_vul) return render(request, 'nessus_vuln_data.html', {'vul_dat': vul_dat})
b1b9e2cce8b00f4a837f2712abd2dc4e2f5edb3d
3,659,098
def delete_data(data, object_name, **kwargs): """ Delete data """ data.delete() is_queryset = isinstance(data, QuerySet) return { "is_queryset": is_queryset, "data": data, "object_name": object_name, }
28405ae426e53fc3637a4b281775cba99e112a0a
3,659,100
def get_g(source): """ Read the Graph from a textfile """ G = {} Grev = {} for i in range(1, N+1): G[i] = [] Grev[i] = [] fin = open(source) for line in fin: v1 = int(line.split()[0]) v2 = int(line.split()[1]) G[v1].append(v2) Grev[v2].append(v1) fin.close() return G, Grev
f2771c28d6c86a0af035cc38cd5cdad2774b0dba
3,659,101
def _mercator(lat_long): """ Calculate the 2D X and Y coordinates from a set of coordinates based on radius, latitude and longitude using the Mercator projection. :param lat_long: The coordinates of the points to be projected expressed as radius, latitude and longitude. :type lat_long: list[tuple] :return: The projected coordinates in the XY-plane. :rtype: ndarray """ x = np.array([coord[0] * coord[2] for coord in lat_long]) y = np.array([coord[0] * np.log(np.tan(np.pi / 4 + coord[1] / 2)) for coord in lat_long]) return np.vstack((x, y)).T
cc1f4eb97f4c5a1505b88ab5aa8fa6992744dccf
3,659,102
def subjectForm(request, experiment_id): """ Generates the fourth page, the demographic/participant data form of an experiment. """ experiment = get_object_or_404(Experiment, pk=experiment_id) form = SubjectDataForm(experiment=experiment) t = Template(experiment.demographic_data_page_tpl) c = RequestContext(request, {'subject_data_form': form, 'experiment': experiment, 'recaptcha_site_key': settings.GOOGLE_RECAPTCHA_SITE_KEY}) return HttpResponse(t.render(c))
48273e891c87b30157c13c726376a9d3052eebe6
3,659,103
def sigma_0(x): """First rotational + shifting mixing function σ_256_0(x) = ROTR_7(x) ⊕ ROTR_18(x) ⊕ SHR_3(x) """ return ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3)
9090dc6652944189765657ad9b3650f54b10e70a
3,659,104
from datetime import datetime def edit_entry(edit_result): """Edit entry""" new_entry = edit_result.copy() edit_key = None edit_value = None date_keys = ["Date"] int_keys = ["Time Spent"] while edit_key not in edit_result: reset_screen("key", "Please type the key you want to edit.") for key, value in edit_result.items(): print(f"{key}: {value}") edit_key = get_input(str) if edit_key not in edit_result: reset_screen(error=True, sub_title="Input is not a valid key.") if edit_key in date_keys: input_type = datetime elif edit_key in int_keys: input_type = int else: input_type = str while not edit_value: reset_screen("new value", ENTRY_QUESTIONS[edit_key]) edit_value = get_input(input_type, newline=False) new_entry[edit_key] = edit_value entries = get_entries() entries[entries.index(edit_result)] = new_entry csvfile = open("entries.csv", "w") csvfile.close() for entry in entries: write_to_csv(entry) return new_entry
e63b9d94f192fdc2175457ebc1ce7f9562e1cf41
3,659,105
def srpd(mvec, k, ra, Nmax, w, V): """ Calculate the Steered Response Power Density (SRPD) :param mvec: SHD coefficients for the TF bin to be analysed :param k: Wave number (2*pi*f/c) :param ra: Radius of the microphone array :param Nmax: Maximum SHD order to be used :param w: Diagonal eigenvalue matrix :param V: Reduced eigenvector matrix :return: SRPD for the given pixel """ assert np.size(mvec) == (Nmax + 1) ** 2 V = V[0:(Nmax + 1) ** 2, 0:(Nmax + 1) ** 2] w = w[0:(Nmax + 1) ** 2] kra = k * ra jn, jnp, yn, ynp = sph_jnyn(Nmax, kra) # jn, jnp, yn, ynp = spec.sph_jnyn(Nmax, kra) hn = jn - 1j * yn hnp = jnp - 1j * ynp bnkra = jn - (jnp / hnp) * hn b = [] for n in range(Nmax + 1): for count in range(-n, n + 1): b.append(1 / (4 * np.pi * (1j) ** n * bnkra[n])) b = np.array(b) p = b * mvec B0 = np.conj(np.matrix(np.conj(p)) * V).T B0s = np.diag(w) * np.multiply(B0, np.conj(B0)) srpval = B0s.sum() return srpval
0506c76812bfdff447f09e4dae8380635e894040
3,659,107
import torch def batch_inverse(tensor): """ Compute the matrix inverse of a batch of square matrices. This routine is used for removing rotational motion during the molecular dynamics simulation. Taken from https://stackoverflow.com/questions/46595157 Args: tensor (torch.Tensor): Tensor of square matrices with the shape n_batch x dim1 x dim1 Returns: torch.Tensor: Tensor of the inverted square matrices with the same shape as the input tensor. """ eye = tensor.new_ones(tensor.size(-1), device=tensor.device).diag().expand_as(tensor) tensor_inv, _ = torch.gesv(eye, tensor) return tensor_inv
b8defb26561e38d5e16e2483f27287a334b2cd61
3,659,108
def create(**kwds): """ Add data. """ status_code = 200 message = "Successfully added data." articles = [] for a in kwds.get("articles", []): a = Article.query.filter_by(id=a).first() if a: articles.append(a) cols = {"user_id": current_user.id, "name": kwds["name"]} model = Bookmark.query.filter_by(**cols).first() if model: for a in articles: exist = model.articles.filter_by(id=a.id).first() if not exist: model.articles.append(a) db_commit() else: cols["articles"] = articles model = Bookmark(**cols) db_add(model) return {"code": status_code, "message": message}
a8add828427b285700f23a041bd2c592346775f2
3,659,109
def _center_size_bbox_to_corners_bbox(centers, sizes): """Converts bbox center-size representation to corners representation. Args: centers: a tensor with shape [N, 2] representing bounding box centers sizes: a tensor with shape [N, 2] representing bounding boxes Returns: corners: tensor with shape [N, 4] representing bounding boxes in corners representation """ return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1)
885bbbe2760a464c6fd3bad0811e91a70610eb8c
3,659,110
from datetime import datetime def get_starting_month(number_of_months_to_get, include_actual_month=True, actual_date=datetime.datetime.now()): """ Get starting month based on parameters :param number_of_months_to_get: Numbers of months to get - e.g: 2 :param include_actual_month: Include actual month? e.g.: True :param actual_date: Actual Date e.g: now() :return: :raise Exception: if number_of_months_to_get less than 1 Initial month & year e.g: (12,2014) """ if number_of_months_to_get <= 0: raise Exception("Number of month's to get should be greater than 0") initial_year = actual_date.year if actual_date.month > number_of_months_to_get: initial_month = actual_date.month - number_of_months_to_get else: initial_month = actual_date.month - number_of_months_to_get if initial_month <= 0: initial_month += 12 initial_year -= 1 if include_actual_month: initial_month += 1 if initial_month > 12: initial_month = 1 initial_year += 1 return initial_month, initial_year
c075ef074b644749ca72955598c098cf76845608
3,659,111
def user_in_user_groups(user_id, **options): """ Get all user groups a user belongs to :param user_id: The id of user :param user_id: str :param options: Generic advanced options dict, see online documentation :type options: dict, optional :return: List of groups user is in :rtype: dict """ uri = [USER_GROUPS_SUB_PATH, user_id] return _call_account_api("get", uri, {}, **options)
70b83b81ee4d03e7ab5fff68be710c02c01aaa0d
3,659,113
def read_book(title_path): """Read a book and return it as a string""" with open(title_path, "r", encoding = "utf8") as current_file: #encoding = "utf8" causes a problem when running the code in Python 2.7. However, it runs normally when using Python 3.5. text = current_file.read() text = text.replace("\n","").replace("\r","") return text
e5273c6b0b71638b47ce5ee5beb33c715c914a1b
3,659,114
from datetime import datetime def eval_whole_scene_one_epoch(sess, ops, test_writer): """ ops: dict mapping from string to tf ops """ global EPOCH_CNT is_training = False test_idxs = np.arange(0, len(TEST_DATASET_WHOLE_SCENE)) num_batches = len(TEST_DATASET_WHOLE_SCENE) total_correct = 0 total_seen = 0 loss_sum = 0 total_seen_class = [0 for _ in range(NUM_CLASSES)] total_correct_class = [0 for _ in range(NUM_CLASSES)] total_correct_vox = 0 total_seen_vox = 0 total_seen_class_vox = [0 for _ in range(NUM_CLASSES)] total_correct_class_vox = [0 for _ in range(NUM_CLASSES)] log_string(str(datetime.now())) log_string('---- EPOCH %03d EVALUATION WHOLE SCENE----'%(EPOCH_CNT)) labelweights = np.zeros(21) labelweights_vox = np.zeros(21) is_continue_batch = False extra_batch_data = np.zeros((0,NUM_POINT,3)) extra_batch_label = np.zeros((0,NUM_POINT)) extra_batch_smpw = np.zeros((0,NUM_POINT)) for batch_idx in range(num_batches): if not is_continue_batch: batch_data, batch_label, batch_smpw = TEST_DATASET_WHOLE_SCENE[batch_idx] batch_data = np.concatenate((batch_data,extra_batch_data),axis=0) batch_label = np.concatenate((batch_label,extra_batch_label),axis=0) batch_smpw = np.concatenate((batch_smpw,extra_batch_smpw),axis=0) else: batch_data_tmp, batch_label_tmp, batch_smpw_tmp = TEST_DATASET_WHOLE_SCENE[batch_idx] batch_data = np.concatenate((batch_data,batch_data_tmp),axis=0) batch_label = np.concatenate((batch_label,batch_label_tmp),axis=0) batch_smpw = np.concatenate((batch_smpw,batch_smpw_tmp),axis=0) if batch_data.shape[0]<BATCH_SIZE: is_continue_batch = True continue elif batch_data.shape[0]==BATCH_SIZE: is_continue_batch = False extra_batch_data = np.zeros((0,NUM_POINT,3)) extra_batch_label = np.zeros((0,NUM_POINT)) extra_batch_smpw = np.zeros((0,NUM_POINT)) else: is_continue_batch = False extra_batch_data = batch_data[BATCH_SIZE:,:,:] extra_batch_label = batch_label[BATCH_SIZE:,:] extra_batch_smpw = batch_smpw[BATCH_SIZE:,:] batch_data = batch_data[:BATCH_SIZE,:,:] batch_label = batch_label[:BATCH_SIZE,:] batch_smpw = batch_smpw[:BATCH_SIZE,:] aug_data = batch_data feed_dict = {ops['pointclouds_pl']: aug_data, ops['labels_pl']: batch_label, ops['smpws_pl']: batch_smpw, ops['is_training_pl']: is_training} summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']], feed_dict=feed_dict) test_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 2) # BxN correct = np.sum((pred_val == batch_label) & (batch_label>0) & (batch_smpw>0)) # evaluate only on 20 categories but not unknown total_correct += correct total_seen += np.sum((batch_label>0) & (batch_smpw>0)) loss_sum += loss_val tmp,_ = np.histogram(batch_label,range(22)) labelweights += tmp for l in range(NUM_CLASSES): total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0)) total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0)) for b in range(batch_label.shape[0]): _, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(aug_data[b,batch_smpw[b,:]>0,:], np.concatenate((np.expand_dims(batch_label[b,batch_smpw[b,:]>0],1),np.expand_dims(pred_val[b,batch_smpw[b,:]>0],1)),axis=1), res=0.02) total_correct_vox += np.sum((uvlabel[:,0]==uvlabel[:,1])&(uvlabel[:,0]>0)) total_seen_vox += np.sum(uvlabel[:,0]>0) tmp,_ = np.histogram(uvlabel[:,0],range(22)) labelweights_vox += tmp for l in range(NUM_CLASSES): total_seen_class_vox[l] += np.sum(uvlabel[:,0]==l) total_correct_class_vox[l] += np.sum((uvlabel[:,0]==l) & (uvlabel[:,1]==l)) log_string('eval whole scene mean loss: %f' % (loss_sum / float(num_batches))) log_string('eval whole scene point accuracy vox: %f'% (total_correct_vox / float(total_seen_vox))) log_string('eval whole scene point avg class acc vox: %f' % (np.mean(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6)))) log_string('eval whole scene point accuracy: %f'% (total_correct / float(total_seen))) log_string('eval whole scene point avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6)))) labelweights = labelweights[1:].astype(np.float32)/np.sum(labelweights[1:].astype(np.float32)) labelweights_vox = labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32)) caliweights = np.array([0.388,0.357,0.038,0.033,0.017,0.02,0.016,0.025,0.002,0.002,0.002,0.007,0.006,0.022,0.004,0.0004,0.003,0.002,0.024,0.029]) caliacc = np.average(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6),weights=caliweights) log_string('eval whole scene point calibrated average acc vox: %f' % caliacc) per_class_str = 'vox based --------' for l in range(1,NUM_CLASSES): per_class_str += 'class %d weight: %f, acc: %f; ' % (l,labelweights_vox[l-1],total_correct_class_vox[l]/float(total_seen_class_vox[l])) log_string(per_class_str) EPOCH_CNT += 1 return caliacc
0c5fd39c8cb464a0b4883be15aa687882a20f94a
3,659,115
def _create_save_name(save_path: str, case_date: date, field_names: list, fix: str = "") -> str: """Creates file name for saved images.""" date_string = case_date.strftime("%Y%m%d") return f"{save_path}{date_string}_{'_'.join(field_names)}{fix}.png"
a731effa50ae291df31fcd4b282a924a057561dd
3,659,116
def list_favorite_queries(): """List of all favorite queries. Returns (title, rows, headers, status)""" headers = ["Name", "Query"] rows = [(r, favoritequeries.get(r)) for r in favoritequeries.list()] if not rows: status = '\nNo favorite queries found.' + favoritequeries.usage else: status = '' return [('', rows, headers, status)]
e3b20d3d06a76d7f621fa830e2d22f0d3e6614ad
3,659,117
def random_portfolio_weights(weights_count) -> np.array: """ Random portfolio weights, of length weights_count. """ weights = np.random.random((weights_count, 1)) weights /= np.sum(weights) return weights.reshape(-1, 1)
47ba5ea84b24ede66fe4d1071fb82f721a550995
3,659,118
def matrix2list(mat): """Create list of lists from blender Matrix type.""" return list(map(list, list(mat)))
9b4b598eb33e4d709e15fd826f23d06653659318
3,659,119
def convert_handle(handle): """ Takes string handle such as 1: or 10:1 and creates a binary number accepted by the kernel Traffic Control. """ if isinstance(handle, str): major, minor = handle.split(':') # "major:minor" minor = minor if minor else '0' return int(major, 16) << 16 | int(minor, 16) return handle
ed4ef5107178bd809a421e0b66c621d9bdaceef1
3,659,120
def index(request): """Display start page""" return HttpResponseRedirect(reverse('admin:index'))
c237e46affb7217bbcfc1146d98f84fb1cc20cc6
3,659,121
import traceback from datetime import datetime async def check_data(user_input, hass, own_id=None): """Check validity of the provided date.""" ret = {} if(CONF_ICS_URL in user_input): try: cal_string = await async_load_data(hass, user_input[CONF_ICS_URL]) try: Calendar.from_ical(cal_string) except Exception: _LOGGER.error(traceback.format_exc()) ret["base"] = ERROR_ICS return ret except Exception: _LOGGER.error(traceback.format_exc()) ret["base"] = ERROR_URL return ret if(CONF_TIMEFORMAT in user_input): try: datetime.datetime.now(get_localzone()).strftime(user_input[CONF_TIMEFORMAT]) except Exception: _LOGGER.error(traceback.format_exc()) ret["base"] = ERROR_TIMEFORMAT return ret if(CONF_ID in user_input): if(user_input[CONF_ID] < 0): _LOGGER.error("ICS: ID below zero") ret["base"] = ERROR_SMALL_ID return ret if(CONF_LOOKAHEAD in user_input): if(user_input[CONF_LOOKAHEAD] < 1): _LOGGER.error("ICS: Lookahead < 1") ret["base"] = ERROR_SMALL_LOOKAHEAD return ret if(CONF_ID in user_input): if((own_id != user_input[CONF_ID]) and (hass is not None)): if(async_generate_entity_id(ENTITY_ID_FORMAT, "ics_" + str(user_input[CONF_ID]), hass=hass) != PLATFORM + ".ics_" + str(user_input[CONF_ID])): _LOGGER.error("ICS: ID not unique") ret["base"] = ERROR_ID_NOT_UNIQUE return ret if(CONF_N_SKIP in user_input): if(user_input[CONF_N_SKIP] < 0): _LOGGER.error("ICS: Skip below zero") ret["base"] = ERROR_NEGATIVE_SKIP return ret return ret
a0b9302cb1f69c98585edb0bae918675ceab32cf
3,659,122
def legendre(N, x): """ Returns the value of Legendre Polynomial P_N(x) at position x[-1, 1]. """ P = np.zeros(2 * N) if N == 0: P[0] = 1 elif N == 1: P[1] = x else: P[0] = 1 P[1] = x for i in range(2, N + 1): P[i] = (1.0 / float(i)) * ((2 * i - 1) * x * P[i - 1] - (i - 1) * P[i - 2]) return(P[N])
0e02e19ef0a251aa4b30823d1598fc5fb8933288
3,659,127
def skip_any_whitespace(doc, idx): """Iterate through characters in ``doc`` starting from index ``idx`` until a non-whitespace character is reached. This iteration will also attempt to ignore comments. Args: doc (str): The JSPEC document. idx (int): The starting index for the iterator. Returns: str: The first non-whitespace character, starting at index ``idx`` int: The index of this character in ``doc`` Raises: JSPECDecodeError: Raised if an unterminated comment is detected. """ nextchar = doc[idx:idx + 1] if nextchar not in WHITESPACE_CHARACTERS: return nextchar, idx while True: idx = WHITESPACE_MATCH(doc, idx).end() if doc[idx:idx + 2] == '//': idx = COMMENT_MATCH(doc, idx).end() continue if doc[idx:idx + 2] != '/*': break m = MULTILINE_COMMENT_MATCH(doc, idx) if m is None: raise JSPECDecodeError("Unterminated comment", doc, idx) idx = m.end() nextchar = doc[idx:idx + 1] return nextchar, idx
18038bce945fb35222254a0fedf5d3936bb83308
3,659,128
def normalized_cross_correlation(f, g): """ Normalized cross-correlation of f and g. Normalize the subimage of f and the template g at each step before computing the weighted sum of the two. Hint: you should look up useful numpy functions online for calculating the mean and standard deviation. Args: f: numpy array of shape (Hf, Wf). g: numpy array of shape (Hg, Wg). Returns: out: numpy array of shape (Hf, Wf). """ Hf, Wf = f.shape Hg, Wg = g.shape if Hg%2 == 0: Hg = Hg-1 if Wg%2 == 0: Wg = Wg-1 g = g[:Hg,:Wg] g_mean = np.mean(g) g_std = np.std(g) filter_vector = g.reshape([1,Hg*Wg]) normalized_filter_vec = (g.reshape([1,Hg*Wg]) - g_mean)/g_std out = np.zeros((Hf, Wf)) ### YOUR CODE HERE pad_height,pad_width = int((Hg-1)/2),int((Wg-1)/2) im_padded = zero_pad(f, pad_height, pad_width) for i in range(Hf): for j in range(Wf): patch_vector = im_padded[i:i+Hg,j:j+Wg].reshape([Hg*Wg,1]) patch_mean = np.mean(patch_vector) patch_std = np.std(patch_vector) normalized_patch_vec = (patch_vector - patch_mean)/patch_std out[i,j] = np.dot(normalized_filter_vec,normalized_patch_vec) ### END YOUR CODE return out
fb6057d882b655a43a7d4a7d3c7ced00d32eeabf
3,659,129
import numpy def sphere_coordinates(sphere, inversion=False): """ Compute spherical coordinates (longitude, latitude) on a sphere. Parameters ---------- sphere: (AimsTimeSurface_3_VOID) a sphere mesh: vertices must be on a sphere with center 0. inversion: bool if True, the longitude coord is inverted (useful for right hemisphere) Return ------ (longitude, latitude): tuple, each element being a TimeTexture_FLOAT """ # a vector of vertices where each vertex is a 3D point # with coordinates in millimeters if isinstance(sphere, (aims.AimsTimeSurface_3_VOID, aims.AimsTimeSurface_2_VOID, aims.AimsTimeSurface_4_VOID)): vert = sphere.vertex() nvert = numpy.asarray(vert) else: nvert = numpy.asarray(sphere) ######################################################################### # A latitude texture # ######################################################################### radius = numpy.sqrt(numpy.square(nvert[:, 0]) + numpy.square(nvert[:, 1])) sphere_lat = numpy.arctan2(radius, nvert[:, 2]) sphere_lat = -sphere_lat * 180. / numpy.pi + 180. slat_tex = aims.TimeTexture(sphere_lat.astype(numpy.float32)) ######################################################################### # A longitude texture # ######################################################################### sphere_lon = numpy.arctan2(nvert[:, 1], nvert[:, 0]) sphere_lon *= 180. / numpy.pi sphere_lon += 180 print('inversion: ', inversion) if inversion == "True": print("there is an inversion", inversion) sphere_lon = 360 - sphere_lon slon_tex = aims.TimeTexture(sphere_lon.astype(numpy.float32)) return slon_tex, slat_tex
82f9e9c0e969904414761ed2ebe70d30194277e5
3,659,130
def get_height(img): """ Returns the number of rows in the image """ return len(img)
765babc9fbc1468ef5045fa925843934462a3d32
3,659,132
def wpt_ask_for_name_and_coords(): """asks for name and coordinates of waypoint that should be created""" name = input("Gib den Namen des Wegpunkts ein: ") print("Gib die Koordinaten ein (Format: X XX°XX.XXX, X XXX°XX.XXX)") coordstr = input(">> ") return name, coordstr
d38a728c5a6ecd1fde9500175ea5895ade8c6880
3,659,133
def car_following_with_adp(distance_2_tan, radian_at_tan, distance_integral, K, estimated_dis, rec): """ Control with `distance_2_tan`, `radian_at_tan` and `distance_integral` with `K` trained from the ADP algorithm. While following the car in front of it with a simple P controller and `distance_2_car`. """ state = np.array([distance_2_tan, radian_at_tan, distance_integral]) MID_K = 1.5 diff = estimated_dis - 70 # try to stay 70cm away from the previous car pwm_mid = 60 if diff < -40: return 0, 0 elif diff >= 60: pwm_mid = 60 else: pwm_mid = np.clip(45.0 + MID_K * diff, 30, 60) print('distance:', estimated_dis, 'diff:', diff, 'mid:', pwm_mid) rec.append([estimated_dis, pwm_mid, distance_2_tan, radian_at_tan, distance_integral]) differential_drive = np.clip(-np.matmul(K, state), -100.0, 100.0) pwm_l_new = np.clip(pwm_mid - differential_drive / 2, 0, 100) pwm_r_new = np.clip(pwm_mid + differential_drive / 2, 0, 100) return pwm_l_new, pwm_r_new
7a49b257e7361451deae10d37a8d8ec811f4890d
3,659,134
def construct_full_available(cards, suits): """ Construct suit availability grid - a list of available suits for each rank slot in each player's deck. Returns grid and array giving the the total number of available suits for each slot. """ num_players, num_in_deck = cards.shape num_available = np.ones(cards.shape)*np.nan # will store the number of possible cards that can fill each deck slot available = [] # will store the suits that can fill each deck slot for player in range(num_players): avail_for_player = [] # holds sublists of available suits for this player for each rank for rank in np.arange(num_in_deck): # iterate over card ranks a = get_available(cards, suits, player, rank) # list suits availed to this player at this rank (can be empty) avail_for_player.append(a) num_available[player, rank] = len(a) available.append(avail_for_player) return num_available, available
0f4b2712a1346372d0782edfbc7c7b69a8e9e8e6
3,659,135
import io def fit_gaussians(estimated_hapcov, chromosomes=None, output_dir=None, cov_max=None, cov_min=None, level=0, cov_sample=None): """ Fits a 7-component Gaussian mixture model to the coverage distribution of the sample, using the appropriate attributes of the PloidyEstimation object. The center of the first Gaussian is initialized from a narrow region around the value of the estimated_hapcov attribute. The centers of the other Gaussians are initialized in a region around the value of estimated_hapcov multiplied by consecutive whole numbers. The parameters of the fitted model (center, sigma and weight) for all seven Gaussians are both saved to the GaussDistParams.pkl file (in output_dir, for later reuse) and set as the value of the distribution_dict attribute. :param cov_sample: a sample of the coverage distribution of the investigated sample, if None, it is loaded from the temporary files of the output_dir (default: None) (array-like) :param cov_min: the maximum value of the coverage for a position to be considered in the estimation (default: None) (int) :param output_dir: the path to the output directory of the PloidyEstimator object, where temporary files are located. If not None, distribution parameters are saved there as GaussDistParams.pkl. (default: None) (str) :param chromosomes: list of chromosomes for the sample (default: None) (array-like) :param estimated_hapcov: the estimated value for the haploid coverage, used as prior (float) :param level: the level of indentation used in verbose output (default: 0) (int) :returns: dictionary containing the fitted parameters of the 7 Gaussians """ def get_samples(coverage_distribution, estimated_haploid_cov, number_of_iterations, burn_period): K = 7 halfwidth_of_uniform = 0.2 __gc.collect() model = __pm.Model() with model: p = __pm.Dirichlet('p', a=__np.array([1., 1., 1., 1., 1., 1., 1.]), shape=K) c1 = __pm.Uniform('c1', (1 - halfwidth_of_uniform) * estimated_haploid_cov, (1 + halfwidth_of_uniform) * estimated_haploid_cov) means = __tt.stack([c1, c1 * 2, c1 * 3, c1 * 4, c1 * 5, c1 * 6, c1 * 7]) order_means_potential = __pm.Potential('order_means_potential', __tt.switch(means[1] - means[0] < 0, -__np.inf, 0) + __tt.switch(means[2] - means[1] < 0, -__np.inf, 0)) sds = __pm.Uniform('sds', lower=0, upper=estimated_haploid_cov / 2, shape=K) category = __pm.Categorical('category', p=p, shape=len(coverage_distribution)) points = __pm.Normal('obs', mu=means[category], sd=sds[category], observed=coverage_distribution) with model: step1 = __pm.Metropolis(vars=[p, sds, means]) step2 = __pm.ElemwiseCategorical(vars=[category], values=[0, 1, 2, 3, 4, 5, 6]) __logging.getLogger("pymc3").setLevel(__logging.WARNING) tr = __pm.sample(draw=number_of_iterations-burn_period, tune=burn_period, step=[step1, step2], progressbar=False, verbose=0, compute_convergence_checks=False) # trace = tr[burn_period:] # return trace return tr if cov_sample is None: cov_sample = io.get_coverage_distribution(chromosomes=chromosomes, output_dir=output_dir, cov_max=cov_max, cov_min=cov_min) iterations2 = 15000 burn_beginning2 = 10000 # logger = __logging.getLogger("pymc3") # logger.propagate = False trace2 = get_samples(coverage_distribution=cov_sample, estimated_haploid_cov=estimated_hapcov, number_of_iterations=iterations2, burn_period=burn_beginning2) std_trace = trace2.get_values('sds', chains=[0]) p_trace = trace2.get_values('p', chains=[0]) sigma = std_trace.mean(axis=0) p = p_trace.mean(axis=0) mu = __np.array([trace2.get_values('c1', chains=[0]).mean() * (i + 1) for i in range(7)]) prior_dict = {'mu': mu, 'sigma': sigma, 'p': p} del trace2 if output_dir: io.save_obj(prior_dict, output_dir + '/GaussDistParams') return prior_dict
660952140f6ea8685488a108e11ea1ca6f4e7fc5
3,659,137
from natsort import natsorted from sklearn.cluster import DBSCAN def remove_outliers(cords, eps: int = 1, min_samples: int = 2): """ Remove outlying cells based on UMAP embeddings with DBScan (density based clustering) Call as: sub.obs["d_cluster"] = remove_outliers(sub.obsm["X_umap"], min_samples = 10) Args: cords: adata UMAP coordinates, typically adata.obsm["X_umap"] eps: Maximum distance between two clusters to still be considered neighbors min_samples: Minimum samples of a cluster Returns: Pandas DataFrame of clusters """ clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(cords) cluster = clustering.labels_.astype("U") return pd.Categorical(cluster, categories=natsorted(np.unique(cluster)))
0b4c581158bc3c074b60ad5d29b333418a4f52ce
3,659,138
def sum_squares(n): """ Returns: sum of squares from 1 to n-1 Example: sum_squares(5) is 1+4+9+16 = 30 Parameter n: The number of steps Precondition: n is an int > 0 """ # Accumulator total = 0 for x in range(n): total = total + x*x return total
669a5aa03a9d9a9ffe74e48571250ffa38a7d319
3,659,139
def resolve_attribute(thing, name): """ A replacement resolver function for looking up symbols as members of *thing*. This is effectively the same as ``thing.name``. The *thing* object can be a :py:func:`~collections.namedtuple`, a custom Python class or any other object. Each of the members of *thing* must be of a compatible data type. .. warning:: This effectively exposes all members of *thing*. If any members are sensitive, then a custom resolver should be used that checks *name* against a whitelist of attributes that are allowed to be accessed. :param thing: The object on which the *name* attribute will be accessed. :param str name: The symbol name that is being resolved. :return: The value for the corresponding attribute *name*. """ if not hasattr(thing, name): raise errors.SymbolResolutionError(name, thing=thing) return getattr(thing, name)
76f7b4548a177168d98bb5cdf4c022bfe8e0d36e
3,659,140
def moment_fluxes(indices, wts_left, wts_right, xi_left, xi_right): """ Computes moment fluxes inputs: ------- num_nodes: number of quadrature nodes, depends on inversion algorithm indices: moment indices, size [ num_moments, num_internal_coords ] wts_left: weights on the left side, size [ num_nodes ] wts_right: weights on the right side, size [ num_nodes ] xi_left: abscissas on the left side, size [ num_internal_coords, num_nodes ] xi_right: abscissas on the right side, size [ num_internal_corods, num_nodes ] """ num_moments = len(indices) num_coords, num_nodes = xi_left.shape flux = np.zeros(num_moments) for i_moment in range(num_moments): for i_node in range(num_nodes): # compute local fluxes flux_left = local_flux( wts_left[i_node], xi_left[:, i_node], indices[i_moment, :] ) flux_right = local_flux( wts_right[i_node], xi_right[:, i_node], indices[i_moment, :] ) # limiter (?) flux_left = flux_left * max(xi_left[0, i_node], 0.0) flux_right = flux_right * min(xi_right[0, i_node], 0.0) # quadrature flux[i_moment] += flux_left + flux_right return flux
24ed54b56afe127963e6cc7f9d74448e8415edb0
3,659,141
def froc_curve_per_side(df_gt, df_pred, thresholds, verbose, cases="all"): """ Compute FROC curve per side/breast. All lesions in a breast are considered TP if any lesion in that breast is detected. """ assert cases in ["all", "cancer", "benign"] if not cases == "all": df_exclude = df_gt[~(df_gt["Class"] == cases)] df_gt = df_gt[df_gt["Class"] == cases] df_pred = df_pred[~(df_pred["StudyUID"].isin(set(df_exclude["StudyUID"])))] df_gt["Side"] = df_gt["View"].astype(str).str[0] df_pred["Side"] = df_pred["View"].astype(str).str[0] total_volumes = len(df_pred.drop_duplicates(subset=["StudyUID", "View"])) total_tps = len(df_gt.drop_duplicates(subset=["PatientID", "Side"])) tpr = [] fps = [] if verbose: print("{} cases FROC:".format(cases.upper())) for th in sorted(thresholds, reverse=True): df_th = df_pred[df_pred["Score"] >= th] df_th_unique_tp = df_th.drop_duplicates(subset=["PatientID", "Side", "TP"]) num_tps_th = float(sum(df_th_unique_tp["TP"])) tpr_th = num_tps_th / total_tps num_fps_th = float(len(df_th[df_th["TP"] == 0])) fps_th = num_fps_th / total_volumes tpr.append(tpr_th) fps.append(fps_th) if verbose: print( "Sensitivity {0:.2f} at {1:.2f} FPs/volume (threshold: {2:.4f})".format( tpr_th * 100, fps_th, th ) ) return tpr, fps
6a113856a920f775be3ce652fa09d9d79fb9be00
3,659,143
import itertools def make_lists(*args, **kwargs): """ The make_lists function attaches auxiliary things to an input key_list of (normally) AD objects. Each key gets exactly one auxiliary thing from each other list -- these lists can be as long as the key_list, or have only one item in (in which case they don't have to be lists at all). Parameters ---------- args: lists of str/AD (or single str/AD) key_list and auxiliary things to be matched to each AD kwargs["force_ad"]: bool coerce strings into AD objects? Returns ------- tuple of lists the lists made from the keys and values """ log = logutils.get_logger(__name__) force_ad = kwargs.pop("force_ad", False) if kwargs: raise TypeError("make_lists() got unexpected keyword arguments " "{}".format(kwargs.keys())) ret_value = [arg if isinstance(arg, (list, tuple)) else [arg] for arg in args] # We allow only one value that can be assigned to multiple keys len_list = len(ret_value[0]) if len_list > 1: for i in range(1, len(ret_value)): if len(ret_value[i]) == 1: ret_value[i] *= len_list if force_ad: # We only want to open as many AD objects as there are unique entries, # so collapse all items in lists to a set and multiple keys with the # same value will be assigned references to the same open AD object ad_map_dict = {} for x in set(itertools.chain(*ret_value)): try: ad_map_dict.update({x: x if isinstance(x, astrodata.AstroData) or x is None else astrodata.open(x)}) except: ad_map_dict.update({x: None}) log.warning(f"Cannot open file {x}") ret_value = [[ad_map_dict[x] for x in List] for List in ret_value] return ret_value
5bdfd32ad317238e21f631655d01bf629722c959
3,659,144
def get_free_comment_url_ajax(content_object, parent=None, ajax_type='json'): """ Given an object and an optional parent, this tag gets the URL to POST to for the creation of new ``FreeThreadedComment`` objects. It returns the latest created object in the AJAX form of the user's choosing (json or xml). """ kwargs = get_contenttype_kwargs(content_object) kwargs.update({'ajax' : ajax_type}) if parent: if not isinstance(parent, FreeThreadedComment): raise template.TemplateSyntaxError, "get_free_comment_url_ajax requires its parent object to be of type FreeThreadedComment" kwargs.update({'parent_id' : getattr(parent, 'pk', getattr(parent, 'id'))}) return reverse('tc_free_comment_parent_ajax', kwargs=kwargs) else: return reverse('tc_free_comment_ajax', kwargs=kwargs)
7d22d2f2b0e012d462d0244d8154cd9ae00ee608
3,659,145
def cosine_score(vector1, vector2): """Calculate cosine cosine score between two spectral vectors.""" return np.dot(vector1, vector2)/np.sqrt(np.dot(np.dot(vector1, vector1), np.dot(vector2, vector2)))
5b206abb179f1635eeda6267e8019901c480afad
3,659,147
def fixture_times() -> Problem[int]: """Generate a problem which tests a times function.""" @test_case(4, 6) @test_case(-2, 16) @test_case(2, -3, aga_hidden=True, aga_output=-6) @problem() def times(x: int, y: int) -> int: """Compute x * y.""" return x * y return times
a00286a5827ec0c4fe7cb390d0d420d11823eb15
3,659,148
def get_axis_bounds(ax=None): """Obtain bounds of axis in format compatible with ipyleaflet Returns: bounds np.array with lat and lon bounds. bounds.tolist() gives [[s, w],[n, e]] """ if ax is None: ax = plt.gca() return np.array([ax.get_ylim(), ax.get_xlim()]).T
32bc97cf6596775dbfdffea655f5346a1fd21764
3,659,149
def get_pymatgen_structure(cell:tuple) -> Structure: """ Get pymatgen structure from cell. Args: cell: Cell (lattice, scaled_positions, symbols). """ return Structure(lattice=cell[0], coords=cell[1], species=cell[2])
c76e0e71da83737f079d36e56b4867e551affeff
3,659,150
def conditional_samples(x_3, x_prime_3, MC_method, M): """Generate mixed sample sets of interest distributed accroding to a conditional PDF. Parameters ---------- x_3 : np.ndarray Array with shape (n_draws, 3). x_prime : np.ndarray Array with shape (n_draws, 3). MC_method : string Specify the Monte Carlo estimator. One of ["brute force", "DLR"], where "DLR" denotes to the double loop reordering approach. M : int The number of conditional bins to genetate if `MC_method` is "DLR". Returns ------- x_mix : np.ndarray Mixed sample sets. Shape has the form (n_draws, 3, n_draws, 3). """ n_draws, n_params = x_3.shape if MC_method == "Brute force": x_3_mix = np.zeros((n_draws, n_params, n_draws, n_params)) for i in range(n_params): for j in range(n_draws): x_3_mix[j, i] = x_3 x_3_mix[j, i, :, i] = x_prime_3[j, i] if MC_method == "DLR": conditional_bin = x_3[:M] x_3_mix = np.zeros((M, n_params, n_draws, n_params)) # subdivide unconditional samples into M eaually bins, # within each bin x_i being fixed. for i in range(n_params): for j in range(M): x_3_mix[j, i] = x_3 x_3_mix[j, i, :, i] = conditional_bin[j, i] return x_3_mix
e80d238f27a65271115fd3de2f574bfc3bbdb432
3,659,152
def recombine(geno_matrix, chr_index, no_loci): #, no_samples): """ Recombine at randomly generated breakpoints. """ recomb = {0: 0, 1: 2, 2: 1, 3: 3} # '0|1' <-> '1|0' no_samples = geno_matrix.shape[0] #print(no_samples) masked, bp_list = designate_breakpoints(chr_index, no_loci, no_samples) #masked, bp_list = designate_breakpoints(chr_index, no_loci, no_samples) z = np.copy(geno_matrix) if np.asarray(bp_list).size > 0: # this would modify the original geno_matrix too! Work with copy! try: z[masked] = np.vectorize(recomb.get)(z[masked]) except: return z return z
455ee154763b31e4d5baa9653caa9f9a118f248e
3,659,153
def update_record_files_async(object_version): """Get the bucket id and spawn a task to update record metadata.""" # convert to string to be able to serialize it when sending to the task str_uuid = str(object_version.bucket_id) return update_record_files_by_bucket.delay(bucket_id=str_uuid)
ba0ed0af4e6a604801344aa459b6279c5a79dfae
3,659,154
import platform def check_platform(): """ str returned """ return platform.system()
73e813c55807e7d84517cb7ce51ce9db34e42c23
3,659,155
def get_field_keys(table): """ Field keys for a selected table :param table: :return: list op dictionaries """ cql = 'SHOW FIELD KEYS FROM \"{}\"'.format(table) response = db_man.influx_qry(cql).get_points() return [x for x in response]
ca7be2b79c1641d407fa52ea805e5d99bb2b5c42
3,659,156
def extract_text_from_spans(spans, join_with_space=True, remove_integer_superscripts=True): """ Convert a collection of page tokens/words/spans into a single text string. """ if join_with_space: join_char = " " else: join_char = "" spans_copy = spans[:] if remove_integer_superscripts: for span in spans: flags = span['flags'] if flags & 2**0: # superscript flag if is_int(span['text']): spans_copy.remove(span) else: span['superscript'] = True if len(spans_copy) == 0: return "" spans_copy.sort(key=lambda span: span['span_num']) spans_copy.sort(key=lambda span: span['line_num']) spans_copy.sort(key=lambda span: span['block_num']) # Force the span at the end of every line within a block to have exactly one space # unless the line ends with a space or ends with a non-space followed by a hyphen line_texts = [] line_span_texts = [spans_copy[0]['text']] for span1, span2 in zip(spans_copy[:-1], spans_copy[1:]): if not span1['block_num'] == span2['block_num'] or not span1['line_num'] == span2['line_num']: line_text = join_char.join(line_span_texts).strip() if (len(line_text) > 0 and not line_text[-1] == ' ' and not (len(line_text) > 1 and line_text[-1] == "-" and not line_text[-2] == ' ')): if not join_with_space: line_text += ' ' line_texts.append(line_text) line_span_texts = [span2['text']] else: line_span_texts.append(span2['text']) line_text = join_char.join(line_span_texts) line_texts.append(line_text) return join_char.join(line_texts).strip()
ccb45164f695bdbbc53eac9c4cf6596e67c24fd0
3,659,157
def cf_resource_pool(cli_ctx, *_): """ Client factory for resourcepools. """ return cf_connectedvmware(cli_ctx).resource_pools
6cc838a7ad23786b5d86f945da98410506f7e758
3,659,158
from typing import OrderedDict def get_bcolz_col_names(cols): """整理适应于bcolz表中列名称规范,返回OrderedDict对象""" trantab = str.maketrans(IN_TABLE, OUT_TABLE) # 制作翻译表 # col_names = OrderedDict( # {col: get_acronym(col.translate(trantab)) for col in cols}) col_names = OrderedDict() for col in cols: if col in (AD_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME): col_names[col] = col else: col_names[col] = regular_name(col, trantab) if len(col_names.values()) != len(set(col_names.values())): raise ValueError("整理后得列名称包含重复值") return col_names
9f52cd5adba9ef5d45ff74ef9b35825b80e2c621
3,659,160
def classify_loss(logits, target, eps): """ """ if eps > 0: loss = cross_entropy_with_smoothing(logits, target, eps, None) else: loss = F.cross_entropy(logits, target.view(-1)) return loss
549d2c1cbd3275153960ffde6f029c231b9e5703
3,659,161
def flip(position, adjacent): """finds the furthest position on grid up to which the player has captured enemy pieces""" interval = (adjacent[0] - position[0], adjacent[1] - position[1]) if adjacent[0] < 0 or adjacent[0] > (8*tile_size): return False elif adjacent[1] < 0 or adjacent[1] > (8*tile_size): return False check_piece = (adjacent[0] + interval[0], adjacent[1] + interval[1]) if check_piece in current_piece: flip_back(adjacent, (interval[0] * -1, interval[1] * -1)) else: return flip(adjacent, check_piece)
f6691ae4fe078668220c68c1df4706a0f5825faf
3,659,162
def zeros_from_spec(nested_spec, batch_size): """Create nested zero Tensors or Distributions. A zero tensor with shape[0]=`batch_size is created for each TensorSpec and A distribution with all the parameters as zero Tensors is created for each DistributionSpec. Args: nested_spec (nested TensorSpec or DistributionSpec): batch_size (int): batch size added as the first dimension to the shapes in TensorSpec Returns: nested Tensor or Distribution """ def _zero_tensor(spec): if batch_size is None: shape = spec.shape else: spec_shape = tf.convert_to_tensor(value=spec.shape, dtype=tf.int32) shape = tf.concat(([batch_size], spec_shape), axis=0) dtype = spec.dtype return tf.zeros(shape, dtype) param_spec = nest_utils.to_distribution_param_spec(nested_spec) params = tf.nest.map_structure(_zero_tensor, param_spec) return nest_utils.params_to_distributions(params, nested_spec)
8c89a930a6fd81d793c95166b90f4621312e69a9
3,659,164
def type_to_str(t): """Return str of variable type.""" if not hasattr(t, "broadcastable"): return str(t) s = broadcastable_to_str(t.broadcastable) if s == "": s = str(t.dtype) else: s = dtype_to_char(t.dtype) + s return s
a07982cbc6c8922c43620d23a3dcced24bafbef4
3,659,165
def save(self, fname="", ext="", slab="", **kwargs): """Saves all current database information. APDL Command: SAVE Parameters ---------- fname File name and directory path (248 characters maximum, including the characters needed for the directory path). An unspecified directory path defaults to the working directory; in this case, you can use all 248 characters for the file name. ext Filename extension (eight-character maximum). slab Mode for saving the database: ALL - Save the model data, solution data and post data (element tables, etc.). This value is the default. MODEL - Save the model data (solid model, finite element model, loadings, etc.) only. SOLU - Save the model data and the solution data (nodal and element results). Notes ----- Saves all current database information to a file (File.DB). In interactive mode, an existing File.DB is first written to a backup file (File.DBB). In batch mode, an existing File.DB is replaced by the current database information with no backup. The command should be issued periodically to ensure a current file backup in case of a system "crash" or a "line drop." It may also be issued before a "doubtful" command so that if the result is not what was intended the database may be easily restored to the previous state. A save may be time consuming for large models. Repeated use of this command overwrites the previous data on the file (but a backup file is first written during an interactive run). When issued from within POST1, the nodal boundary conditions in the database (which were read from the results file) will overwrite the nodal boundary conditions existing on the database file. Internal nodes may be created during solution (for example, via the mixed u-P formulation or generalized plane strain option for current- technology elements, the Lagrangian multiplier method for contact elements or the MPC184 elements, or the quadratic or cubic option of the BEAM188 and PIPE288 elements). It is sometimes necessary to save the internal nodes in the database for later operations, such as cutting boundary interpolations (CBDOF) for submodeling. To do so, issue the SAVE command after the first SOLVE command. In general, saving after solving is always a good practice. This command is valid in any processor. """ return self.run(f"SAVE,{fname},{ext},,{slab}", **kwargs)
ddc79dc0f54e32d6cd96e115ad9842c1689c17b1
3,659,166
def rollout( env, agent, max_path_length=np.inf, render=False, render_kwargs=None, fast_rgb=True ): """ The following value for the following keys will be a 2D array, with the first dimension corresponding to the time dimension. - observations - actions - rewards - next_observations - terminals The next two elements will be lists of dictionaries, with the index into the list being the index into the time - agent_infos - env_infos """ if render_kwargs is None: render_kwargs = {} observations = [] actions = [] rewards = [] terminals = [] agent_infos = [] env_infos = [] rgb_array = [] o = env.reset() agent.reset() next_o = None path_length = 0 if hasattr(env, 'sim') and 'fixed' in env.sim.model.camera_names: camera_name = 'fixed' else: camera_name = None if render: # import ipdb; ipdb.set_trace(context=10) if render_kwargs['mode'] == 'rgb_array': if not fast_rgb: rgb_array.append(env.sim.render(500, 500, camera_name=camera_name)) else: rgb_array.append(np.zeros((500, 500, 3), dtype=np.uint8)) else: env.render(**render_kwargs) # print("###############################") while path_length < max_path_length: a, agent_info = agent.get_action(o) # print(a) next_o, r, d, env_info = env.step(a) observations.append(o) rewards.append(r) terminals.append(d) actions.append(a) agent_infos.append(agent_info) env_infos.append(env_info) path_length += 1 if d: break o = next_o if render: if render_kwargs['mode'] == 'rgb_array': if path_length % 3 == 0 or not fast_rgb: rgb_array.append(env.sim.render(500, 500, camera_name=camera_name)) else: rgb_array.append(np.zeros((500, 500, 3), dtype=np.uint8)) else: env.render(**render_kwargs) actions = np.array(actions) if len(actions.shape) == 1: actions = np.expand_dims(actions, 1) observations = np.array(observations) if len(observations.shape) == 1: observations = np.expand_dims(observations, 1) next_o = np.array([next_o]) next_observations = np.vstack( ( observations[1:, :], np.expand_dims(next_o, 0) ) ) result = dict( observations=observations, actions=actions, rewards=np.array(rewards).reshape(-1, 1), next_observations=next_observations, terminals=np.array(terminals).reshape(-1, 1), agent_infos=agent_infos, env_infos=env_infos, ) if len(rgb_array) > 0 and rgb_array[0] is not None: result['rgb_array'] = np.array(rgb_array) return result
a90c712155648773e72d5226b0f2be4c7fe72b2a
3,659,167
def data_dir(): """The data directory.""" return DATA
f7696b434ebdab7ec1619f42bed124ba562de64d
3,659,169
def create_single_test(j): """Walk through the json cases and recursively write the test cases""" si = [] for tnum, c in enumerate(j['cases']): if 'cases' in c: si.extend(create_single_test(c)) else: si.extend(write_testcase(c, tnum)) return si
4a37a95f59e90b5314ea225f58144fa112b9722e
3,659,170
def _token_text(token): """Helper to get the text of a antlr token w/o the <EOF>""" istream = token.getInputStream() if istream is None: return token.text n = istream.size if token.start >= n or token.stop >= n: return [] return token.text
0821c44eea9dfc229034bebc45211f8e6336c552
3,659,171
def show_interface(enode, dev, shell=None): """ Show the configured parameters and stats of an interface. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str dev: Unix network device name. Ex 1, 2, 3.. :rtype: dict :return: A combined dictionary as returned by both :func:`topology_lib_ip.parser._parse_ip_addr_show` :func:`topology_lib_ip.parser._parse_ip_stats_link_show` """ assert dev cmd = 'ip addr list dev {ldev}'.format(ldev=dev) response = enode(cmd, shell=shell) first_half_dict = _parse_ip_addr_show(response) d = None if (first_half_dict): cmd = 'ip -s link list dev {ldev}'.format(ldev=dev) response = enode(cmd, shell=shell) second_half_dict = _parse_ip_stats_link_show(response) d = first_half_dict.copy() d.update(second_half_dict) return d
54ae542cf5df747ad45e016b8296a7ae5408635e
3,659,172
def get_params_for_category_api(category): """Method to get `GET` parameters for querying MediaWiki for category details. :param category: category name to be passed in params. :return: GET parameters `params` """ params = CATEGORY_API_PARAMS.copy() params['cmtitle'] = 'Category:' + category return params
c97be0a2aae9b1d92e5a02d4376e0a186f669735
3,659,173
def get_dict_or_generate(dictionary, key, generator): """Get value from dict or generate one using a function on the key""" if key in dictionary: return dictionary[key] value = generator(key) dictionary[key] = value return value
e31cd2b6661cf45e5345ce57d1e628174e6fd732
3,659,174
def createNotInConfSubGraph(graphSet, possibleSet): """ Return a subgraph by removing all incoming edges to nodes in the possible set. """ subGraph = {} for i in graphSet: subGraph[i] = graphSet[i] - possibleSet return subGraph
d3cbee9049416d7ff865306713e9a12f26717fae
3,659,175
def _backprop_gradient_pure(dL, L): """ Given the derivative of an objective fn with respect to the cholesky L, compute the derivate with respect to the original matrix K, defined as K = LL^T where L was obtained by Cholesky decomposition """ dL_dK = np.tril(dL).copy() N = L.shape[0] for k in range(N - 1, -1, -1): for j in range(k + 1, N): for i in range(j, N): dL_dK[i, k] -= dL_dK[i, j] * L[j, k] dL_dK[j, k] -= dL_dK[i, j] * L[i, k] for j in range(k + 1, N): dL_dK[j, k] /= L[k, k] dL_dK[k, k] -= L[j, k] * dL_dK[j, k] dL_dK[k, k] /= (2 * L[k, k]) return dL_dK
28ab304a375e20f952da341024a09477221d54c5
3,659,176
import random def get_random_instance() -> random.Random: """ Returns the Random instance in the random module level. """ return random._inst
ee66055275153ce8c3eae67eade6e32e50fe1d79
3,659,177
import types def to(cond, inclusive = True): """ Stream elements until the one that fits some condition. Arguments: cond -- Either a function or some other object. In the first case, the function will be applied to each element; in the second case, the object will be compared (using ==) with each element. Keyword Arguments: inclusive -- Whether the element first matching the criteria is streamed (default True) See Also: :func:`dagpype.filt` :func:`dagpype.from_` :func:`dagpype.from_to` :func:`dagpype.skip` :func:`dagpype.nth` :func:`dagpype.slice_` :func:`dagpype.tail` Examples: >>> source([1, 2, 3, 4, 3, 2, 1]) | to(2) | to_list() [1, 2] >>> source([1, 2, 3, 4, 3, 2, 1]) | to(2, False) | to_list() [1] >>> source([1, 2, 3, 4, 3, 2, 1]) | to(lambda d: d % 3 == 0) | to_list() [1, 2, 3] """ @filters def _dagpype_internal_fn_act(target): try: if isinstance(cond, types.FunctionType): while True: e = (yield) if cond(e): break target.send(e) else: while True: e = (yield) if e == cond: break target.send(e) if inclusive: target.send(e) target.close() except GeneratorExit: target.close() return _dagpype_internal_fn_act
bc7b4fec4b868e12f4e075256ada80c05dfd2c4d
3,659,178
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) print(w.shape) print(b) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(b=b,learning_rate=learning_rate,num_iterations=num_iterations,print_cost=print_cost,w=w,X=X_train,Y=Y_train) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(b=b,w=w,X=X_test) Y_prediction_train = predict(b=b,w=w,X=X_train) # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d
073f474ada5d43811564180026cb9d4b2b052cf4
3,659,179
def mixlogistic_invcdf(y, *, logits, means, logscales, mix_dim, tol=1e-8, max_bisection_iters=60, init_bounds_scale=100.): """ inverse cumulative distribution function of a mixture of logistics, via bisection """ if _FORCE_ACCURATE_INV_CDF: tol = min(tol, 1e-14) max_bisection_iters = max(max_bisection_iters, 200) init_bounds_scale = max(init_bounds_scale, 100.) return mixlogistic_invlogcdf(y.log(), logits=logits, means=means, logscales=logscales, mix_dim=mix_dim, tol=tol, max_bisection_iters=max_bisection_iters, init_bounds_scale=init_bounds_scale)
ef25170fbaaa5eae55b22b09d2d2fb66d20d03fe
3,659,180
import cgitb def FormatException(exc_info): """Gets information from exception info tuple. Args: exc_info: exception info tuple (type, value, traceback) Returns: exception description in a list - wsgi application response format. """ return [cgitb.handler(exc_info)]
733c2170a08f9880f8c191c1c6a52ee1ab455b7f
3,659,181
def trackers_init(box, vid_path, image): """Initialize a single tracker""" tracker = cv2.TrackerCSRT_create() tracker.init(image, box) return tracker, cv2.VideoCapture(vid_path)
9b32501ad68dcc698fad2b734ff140be7a137903
3,659,182
from typing import Optional def get_image(id: Optional[int] = None, name: Optional[str] = None, slug: Optional[str] = None, source: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImageResult: """ Get information on an image for use in other resources (e.g. creating a Droplet based on snapshot). This data source provides all of the image properties as configured on your DigitalOcean account. This is useful if the image in question is not managed by the provider or you need to utilize any of the image's data. An error is triggered if zero or more than one result is returned by the query. ## Example Usage Get the data about a snapshot: ```python import pulumi import pulumi_digitalocean as digitalocean example1 = digitalocean.get_image(name="example-1.0.0") ``` Reuse the data about a snapshot to create a Droplet: ```python import pulumi import pulumi_digitalocean as digitalocean example_image = digitalocean.get_image(name="example-1.0.0") example_droplet = digitalocean.Droplet("exampleDroplet", image=example_image.id, region="nyc2", size="s-1vcpu-1gb") ``` Get the data about an official image: ```python import pulumi import pulumi_digitalocean as digitalocean example2 = digitalocean.get_image(slug="ubuntu-18-04-x64") ``` :param int id: The id of the image :param str name: The name of the image. :param str slug: The slug of the official image. :param str source: Restrict the search to one of the following categories of images: """ __args__ = dict() __args__['id'] = id __args__['name'] = name __args__['slug'] = slug __args__['source'] = source if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('digitalocean:index/getImage:getImage', __args__, opts=opts, typ=GetImageResult).value return AwaitableGetImageResult( created=__ret__.created, description=__ret__.description, distribution=__ret__.distribution, error_message=__ret__.error_message, id=__ret__.id, image=__ret__.image, min_disk_size=__ret__.min_disk_size, name=__ret__.name, private=__ret__.private, regions=__ret__.regions, size_gigabytes=__ret__.size_gigabytes, slug=__ret__.slug, source=__ret__.source, status=__ret__.status, tags=__ret__.tags, type=__ret__.type)
180e133173ddb6e99d1743326ec5dcacbc7d5901
3,659,183
def _infer_added_params(kw_params): """ Infer values for proplot's "added" parameters from stylesheets. """ kw_proplot = {} mpl_to_proplot = { 'font.size': ('tick.labelsize',), 'axes.titlesize': ( 'abc.size', 'suptitle.size', 'title.size', 'leftlabel.size', 'rightlabel.size', 'toplabel.size', 'bottomlabel.size', ), 'text.color': ( 'abc.color', 'suptitle.color', 'tick.labelcolor', 'title.color', 'leftlabel.color', 'rightlabel.color', 'toplabel.color', 'bottomlabel.color', ), } for key, params in mpl_to_proplot.items(): if key in kw_params: value = kw_params[key] for param in params: kw_proplot[param] = value return kw_proplot
fec171caef3562344ee86684edc944b0d08af3f3
3,659,184
def create_table_description(config: ConfigLoader): """ creates the description for the pytables table used for dataloading """ n_sample_values = int(config.SAMPLING_RATE * config.SAMPLE_DURATION) table_description = { COLUMN_MOUSE_ID: tables.Int16Col(), COLUMN_LABEL: tables.StringCol(10) } for c in config.CHANNELS: table_description[c] = tables.Float32Col(shape=n_sample_values) return table_description
bd26332586a87e66e14427adb3b0c1ddfd809ce9
3,659,185
def get_target_rank_list(daos_object): """Get a list of target ranks from a DAOS object. Note: The DaosObj function called is not part of the public API Args: daos_object (DaosObj): the object from which to get the list of targets Raises: DaosTestError: if there is an error obtaining the target list from the object Returns: list: list of targets for the specified object """ try: daos_object.get_layout() return daos_object.tgt_rank_list except DaosApiError as error: raise DaosTestError( "Error obtaining target list for the object: {}".format(error))
9ce003a4e21ed0fbbf58b57989273939613fff95
3,659,186
import copy def find_global_best(particle_best=[]): """ Searches for the best particle best to make it the global best. :param particle_best: :return: """ best_found = None for particle in particles_best: if best_found is None: best_found = copy(particle) elif particle.total_cost < best_found.total_cost: best_found = copy(particle) print('\nBest found: ', best_found) return best_found
15a6b0f970e385fdc83fcffe19808c61d2a14d7f
3,659,187
def rename_to_monet_latlon(ds): """Short summary. Parameters ---------- ds : type Description of parameter `ds`. Returns ------- type Description of returned object. """ if "lat" in ds.coords: return ds.rename({"lat": "latitude", "lon": "longitude"}) elif "Latitude" in ds.coords: return ds.rename({"Latitude": "latitude", "Longitude": "longitude"}) elif "Lat" in ds.coords: return ds.rename({"Lat": "latitude", "Lon": "longitude"}) elif "grid_lat" in ds.coords: return ds.rename({"grid_lat": "latitude", "grid_lon": "longitude"}) else: return ds
18647e3bbf82bae9d02db3e965c0ddfd51ddd6dd
3,659,188
def payments_reset(): """ Removes all payments from the database """ Payment.remove_all() return make_response('', status.HTTP_204_NO_CONTENT)
c5132e8a1809a2b04ba4282d3f05aafbcf996209
3,659,189