content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _parse_descriptor(desc: str, ctx: '_ParseDescriptorContext') -> 'Descriptor': """ :meta private: Parse a descriptor given the context level we are in. Used recursively to parse subdescriptors :param desc: The descriptor string to parse :param ctx: The :class:`_ParseDescriptorContext` indicating the level we are in :return: The parsed descriptor :raises: ValueError: if the descriptor is malformed """ func, expr = _get_func_expr(desc) if func == "pk": pubkey, expr = parse_pubkey(expr) if expr: raise ValueError("more than one pubkey in pk descriptor") return PKDescriptor(pubkey) if func == "pkh": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH or ctx == _ParseDescriptorContext.P2WSH): raise ValueError("Can only have pkh at top level, in sh(), or in wsh()") pubkey, expr = parse_pubkey(expr) if expr: raise ValueError("More than one pubkey in pkh descriptor") return PKHDescriptor(pubkey) if func == "sortedmulti" or func == "multi": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH or ctx == _ParseDescriptorContext.P2WSH): raise ValueError("Can only have multi/sortedmulti at top level, in sh(), or in wsh()") is_sorted = func == "sortedmulti" comma_idx = expr.index(",") thresh = int(expr[:comma_idx]) expr = expr[comma_idx + 1:] pubkeys = [] while expr: pubkey, expr = parse_pubkey(expr) pubkeys.append(pubkey) if len(pubkeys) == 0 or len(pubkeys) > 16: raise ValueError("Cannot have {} keys in a multisig; must have between 1 and 16 keys, inclusive".format(len(pubkeys))) elif thresh < 1: raise ValueError("Multisig threshold cannot be {}, must be at least 1".format(thresh)) elif thresh > len(pubkeys): raise ValueError("Multisig threshold cannot be larger than the number of keys; threshold is {} but only {} keys specified".format(thresh, len(pubkeys))) if ctx == _ParseDescriptorContext.TOP and len(pubkeys) > 3: raise ValueError("Cannot have {} pubkeys in bare multisig: only at most 3 pubkeys") return MultisigDescriptor(pubkeys, thresh, is_sorted) if func == "wpkh": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH): raise ValueError("Can only have wpkh() at top level or inside sh()") pubkey, expr = parse_pubkey(expr) if expr: raise ValueError("More than one pubkey in pkh descriptor") return WPKHDescriptor(pubkey) if func == "sh": if ctx != _ParseDescriptorContext.TOP: raise ValueError("Can only have sh() at top level") subdesc = _parse_descriptor(expr, _ParseDescriptorContext.P2SH) return SHDescriptor(subdesc) if func == "wsh": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH): raise ValueError("Can only have wsh() at top level or inside sh()") subdesc = _parse_descriptor(expr, _ParseDescriptorContext.P2WSH) return WSHDescriptor(subdesc) if func == "tr": if ctx != _ParseDescriptorContext.TOP: raise ValueError("Can only have tr at top level") internal_key, expr = parse_pubkey(expr) subscripts = [] depths = [] if expr: # Path from top of the tree to what we're currently processing. # branches[i] == False: left branch in the i'th step from the top # branches[i] == true: right branch branches = [] while True: # Process open braces while True: try: expr = _get_const(expr, "{") branches.append(False) except ValueError: break if len(branches) > MAX_TAPROOT_NODES: raise ValueError("tr() supports at most {MAX_TAPROOT_NODES} nesting levels") # Process script expression sarg, expr = _get_expr(expr) subscripts.append(_parse_descriptor(sarg, _ParseDescriptorContext.P2TR)) depths.append(len(branches)) # Process closing braces while len(branches) > 0 and branches[-1]: expr = _get_const(expr, "}") branches.pop() # If we're at the end of a left branch, expect a comma if len(branches) > 0 and not branches[-1]: expr = _get_const(expr, ",") branches[-1] = True if len(branches) == 0: break return TRDescriptor(internal_key, subscripts, depths) if ctx == _ParseDescriptorContext.P2SH: raise ValueError("A function is needed within P2SH") elif ctx == _ParseDescriptorContext.P2WSH: raise ValueError("A function is needed within P2WSH") raise ValueError("{} is not a valid descriptor function".format(func))
7c755482ba31ce656ae597eeb166de9cfaa2f649
400
def get_editable_fields(cc_content, context): """ Return the set of fields that the requester can edit on the given content """ # For closed thread: # no edits, except 'abuse_flagged' and 'read' are allowed for thread # no edits, except 'abuse_flagged' is allowed for comment ret = {"abuse_flagged"} if cc_content["type"] == "thread" and cc_content["closed"]: ret |= {"read"} return ret if cc_content["type"] == "comment" and context["thread"]["closed"]: return ret # Shared fields ret |= {"voted"} if _is_author_or_privileged(cc_content, context): ret |= {"raw_body"} # Thread fields if cc_content["type"] == "thread": ret |= {"following", "read"} if _is_author_or_privileged(cc_content, context): ret |= {"topic_id", "type", "title"} if context["is_requester_privileged"] and context["discussion_division_enabled"]: ret |= {"group_id"} # Comment fields if ( cc_content["type"] == "comment" and ( context["is_requester_privileged"] or ( _is_author(context["thread"], context) and context["thread"]["thread_type"] == "question" ) ) ): ret |= {"endorsed"} return ret
19cea27fdda79b365c25329851bb7baf8d18bcac
401
def rate_of_matrix_function(A, Adot, f, fprime): """Find the rate of the tensor A Parameters ---------- A : ndarray (3,3) A diagonalizable tensor Adot : ndarray (3,3) Rate of A f : callable fprime : callable Derivative of f Returns ------- Ydot : ndarray (3,3) Notes ----- For a diagonalizable tensor A (the strain) which has a quasi-arbitrary spectral expansion .. math:: A = \sum_{i=1}^3 \lambda_i P_{i} and if a second tensor Y is a principal function of A, defined by .. math:: Y = \sum_{i=1}^3 f(\lambda_i) P_i, compute the time rate \dot{Y}. Algorithm taken from Brannon's Tensor book, from the highlighted box near Equation (28.404) on page 550. """ # Compute the eigenvalues and eigenprojections. eig_vals, eig_vecs = np.linalg.eig(A) eig_projs = [np.outer(eig_vecs[:, i], eig_vecs[:, i]) for i in [0, 1, 2]] # Assemble the rate of Y. Ydot = np.zeros((3, 3)) for eigi, proji in zip(eig_vals, eig_projs): for eigj, projj in zip(eig_vals, eig_projs): if eigi == eigj: gamma = fprime(eigi) else: gamma = (f(eigi) - f(eigj)) / (eigi - eigj) Ydot += gamma * np.dot(proji, np.dot(Adot, projj)) return Ydot
404d189b2bc6cc91c30ef857a8ebef7cc0db49d9
402
def enumerate_changes(levels): """Assign a unique integer to each run of identical values. Repeated but non-consecutive values will be assigned different integers. """ return levels.diff().fillna(0).abs().cumsum().astype(int)
4787c0e84d6bca8f6038389e5bebf74317059ed8
403
def TDataStd_ByteArray_Set(*args): """ * Finds or creates an attribute with the array. If <isDelta> == False, DefaultDeltaOnModification is used. If attribute is already set, all input parameters are refused and the found attribute is returned. :param label: :type label: TDF_Label & :param lower: :type lower: int :param upper: :type upper: int :param isDelta: default value is Standard_False :type isDelta: bool :rtype: Handle_TDataStd_ByteArray """ return _TDataStd.TDataStd_ByteArray_Set(*args)
a0f3402e1106021affb3dfe12fe93c5ae8ed2dad
404
def _get_total_elements(viewer) -> int: """ We need to fetch a workflows listing to figure out how many entries we have in the database, since the API does not contain a method to count the DB entries. :param viewer: CWL Viewer instance URL :return: number of total elements in the CWL Viewer instance DB """ smallest_workflow_dataset: dict = _fetch_workflows_data(viewer, 0, 1).json() return int(smallest_workflow_dataset['totalElements'])
a7289ed13546b68e381793e0fdd8410f986f87d4
405
def entrepreneursIncubated(dateFrom=None, dateTo=None): """ Returns all entrepreneurs ages count between a set of ranges """ queryset = Stage.objects output = { 'queryset': None, 'fields': [], 'values': [], 'fieldLabels': [], } queryset = queryset.filter(stage_type="IN") # check for duplicated projects = Project.objects.filter(id__in=queryset.values('project_id')) entrepreneurs = Entrepreneur.objects.filter(id__in=projects.values('entrepreneurs')) output['queryset'] = entrepreneurs fieldsDict = helperDictionaries.getModelReportFields('entrepreneurs') output['fieldDict'] = fieldsDict output['fields'] = [*fieldsDict.keys()] output['fieldLabels'] = [*fieldsDict.values()] return output
5b06dc8a9ca15357ecab20e615d329fcaaffc8d8
406
def get_steps(x, shape): """ Convert a (vocab_size, steps * batch_size) array into a [(vocab_size, batch_size)] * steps list of views """ steps = shape[1] if x is None: return [None for step in range(steps)] xs = x.reshape(shape + (-1,)) return [xs[:, step, :] for step in range(steps)]
44133ddd1ad78b3ea05042c6c16558bb982c9206
407
def LHS( a: int, operation1: str, b: int, operation2: str, c: float ): """ E.g. LHS(a, 'plus', b, 'times', c) does (a + b) * c params: a: int. First number in equation operation1: str. Must be 'plus', 'minus', 'times', 'divide' b : int. Second number in equation operation2: str. Must be 'plus', 'minus', 'times', 'divide' c: float. Third number in equation return: int """ step_1 = word_function(a, operation1, b) step_2 = word_function(step_1, operation2, c) return step_2
af023f3cd70d123492c8a6abb92d7ae2994b56ae
408
import math def _validate(api_indicator_matype, option, parameters:dict, **kwargs): # -> dict """Validates kwargs and attaches them to parameters.""" # APO, PPO, BBANDS matype = int(math.fabs(kwargs["matype"])) if "matype" in kwargs else None if option == "matype" and matype is not None and matype in api_indicator_matype: parameters["matype"] = matype # BBANDS nbdevup = math.fabs(kwargs["nbdevup"]) if "nbdevup" in kwargs else None nbdevdn = math.fabs(kwargs["nbdevdn"]) if "nbdevdn" in kwargs else None if option == "nbdevup" and nbdevup is not None: parameters["nbdevup"] = nbdevup if option == "nbdevdn" and nbdevdn is not None: parameters["nbdevdn"] = nbdevdn # ULTOSC timeperiod1 = int(math.fabs(kwargs["timeperiod1"])) if "timeperiod1" in kwargs else None timeperiod2 = int(math.fabs(kwargs["timeperiod2"])) if "timeperiod2" in kwargs else None timeperiod3 = int(math.fabs(kwargs["timeperiod3"])) if "timeperiod3" in kwargs else None if option == "timeperiod1" and timeperiod1 is not None: parameters["timeperiod1"] = timeperiod1 if option == "timeperiod2" and timeperiod2 is not None: parameters["timeperiod2"] = timeperiod2 if option == "timeperiod3" and timeperiod3 is not None: parameters["timeperiod3"] = timeperiod3 # SAR acceleration = math.fabs(float(kwargs["acceleration"])) if "acceleration" in kwargs else None maximum = math.fabs(float(kwargs["maximum"])) if "maximum" in kwargs else None if option == "acceleration" and acceleration is not None: parameters["acceleration"] = acceleration if option == "maximum" and maximum is not None: parameters["maximum"] = maximum # MAMA fastlimit = math.fabs(float(kwargs["fastlimit"])) if "fastlimit" in kwargs else None slowlimit = math.fabs(float(kwargs["slowlimit"])) if "slowlimit" in kwargs else None if option == "fastlimit" and fastlimit is not None and fastlimit > 0 and fastlimit < 1: parameters["fastlimit"] = fastlimit if option == "slowlimit" and slowlimit is not None and slowlimit > 0 and slowlimit < 1: parameters["slowlimit"] = slowlimit # MACD, APO, PPO, ADOSC fastperiod = int(math.fabs(kwargs["fastperiod"])) if "fastperiod" in kwargs else None slowperiod = int(math.fabs(kwargs["slowperiod"])) if "slowperiod" in kwargs else None signalperiod = int(math.fabs(kwargs["signalperiod"])) if "signalperiod" in kwargs else None if option == "fastperiod" and fastperiod is not None: parameters["fastperiod"] = fastperiod if option == "slowperiod" and slowperiod is not None: parameters["slowperiod"] = slowperiod if option == "signalperiod" and signalperiod is not None: parameters["signalperiod"] = signalperiod # MACDEXT fastmatype = int(math.fabs(kwargs["fastmatype"])) if "fastmatype" in kwargs else None slowmatype = int(math.fabs(kwargs["slowmatype"])) if "slowmatype" in kwargs else None signalmatype = int(math.fabs(kwargs["signalmatype"])) if "signalmatype" in kwargs else None if option == "fastmatype" and fastmatype is not None and fastmatype in api_indicator_matype: parameters["fastmatype"] = fastmatype if option == "slowmatype" and slowmatype is not None and slowmatype in api_indicator_matype: parameters["slowmatype"] = slowmatype if option == "signalmatype" and signalmatype is not None and signalmatype in api_indicator_matype: parameters["signalmatype"] = signalmatype # STOCH(F), STOCHRSI fastkperiod = int(math.fabs(kwargs["fastkperiod"])) if "fastkperiod" in kwargs else None fastdperiod = int(math.fabs(kwargs["fastdperiod"])) if "fastdperiod" in kwargs else None fastdmatype = int(math.fabs(kwargs["fastdmatype"])) if "fastdmatype" in kwargs else None if option == "fastkperiod" and fastkperiod is not None: parameters["fastkperiod"] = fastkperiod if option == "fastdperiod" and fastdperiod is not None: parameters["fastdperiod"] = fastdperiod if option == "fastdmatype" and fastdmatype is not None and fastdmatype in api_indicator_matype: parameters["fastdmatype"] = fastdmatype # STOCH(F), STOCHRSI slowkperiod = int(math.fabs(kwargs["slowkperiod"])) if "slowkperiod" in kwargs else None slowdperiod = int(math.fabs(kwargs["slowdperiod"])) if "slowdperiod" in kwargs else None slowkmatype = int(math.fabs(kwargs["slowkmatype"])) if "slowkmatype" in kwargs else None slowdmatype = int(math.fabs(kwargs["slowdmatype"])) if "slowdmatype" in kwargs else None if option == "slowkperiod" and slowkperiod is not None: parameters["slowkperiod"] = slowkperiod if option == "slowdperiod" and slowdperiod is not None: parameters["slowdperiod"] = slowdperiod if option == "slowkmatype" and slowkmatype is not None and slowkmatype in api_indicator_matype: parameters["slowkmatype"] = slowkmatype if option == "slowdmatype" and slowdmatype is not None and slowdmatype in api_indicator_matype: parameters["slowdmatype"] = slowdmatype return parameters
d73903514aa87f854d08e3447cca85f64eaa4b31
409
def scale_y_values(y_data, y_reference, y_max): """ Scale the plot in y direction, to prevent extreme values. :param y_data: the y data of the plot :param y_reference: the maximum value of the plot series (e.g. Normal force), which will be scaled to y_max :param y_max: the maximum y value for the plot (e.g. if y_max=1, no y value in the plot will be greater than 1) """ multipl_factor = y_max / y_reference for i in range(len(y_data)): y_data[i] = y_data[i] * multipl_factor return y_data, multipl_factor
b3b22b0f868ce46926a4eecfc1c5d0ac2a7c1f7e
410
def set_heating_contribution(agent, pv_power): """ If the water tank is currently in use, compute and return the part of the pv_power used for heating the water""" pv_power_to_heating = 0 if agent.water_tank.is_active(): pv_power_to_heating = pv_power * agent.pv_panel.heating_contribution return pv_power_to_heating
ece29b7f0fbbe10907ada8fd1450919f01ab74c3
411
import tqdm def predict_direction(clf, tickers, **kwargs): """ Use clf (an untrained classifier) to predict direction of change for validation data for each stock in 'tickers'. Pass additional keyword arguments to be used in building the stock datasets. Args: --clf: An untrained sklearn classifier --tickers: A list of tickers to use --kwargs: Additional arguments for the StockDataset class Returns: A dictionary where each key is a ticker in 'tickers' and each value is the accuracy for the predictions for that ticker. """ results = {} for ticker in tqdm(tickers): # Build and split dataset ds = StockDataset(tickers=ticker, quiet=True, **kwargs) t_data, v_data, t_label, v_label = ds.split(label_field='Direction') # Clone classifier clf_clone = sklearn.base.clone(clf) # Fit classifier to data clf_clone.fit(t_data, t_label) # Predict and store results v_pred = clf_clone.predict(v_data) results[ticker] = mymetrics.direction_accuracy(v_label, v_pred) return results
798e25d3b652227407b50e8eec9f0289770d9d9a
412
import os import csv def load_from_csv(): """ Loads a list of Currency objects from CSV """ file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'currencies.csv') currencies = [] with open(file) as csvfile: reader = csv.reader(csvfile) headers = next(reader) for row in reader: currencies.append(Currency(*row)) return currencies
cc7fa3f50f0e0cd4a3a04d013d7baae2a4f55e5f
413
def parse_primary(index): """Parse primary expression.""" if token_is(index, token_kinds.open_paren): node, index = parse_expression(index + 1) index = match_token(index, token_kinds.close_paren, ParserError.GOT) return expr_nodes.ParenExpr(node), index elif token_is(index, token_kinds.number): return expr_nodes.Number(p.tokens[index]), index + 1 elif (token_is(index, token_kinds.identifier) and not p.symbols.is_typedef(p.tokens[index])): return expr_nodes.Identifier(p.tokens[index]), index + 1 elif token_is(index, token_kinds.string): return expr_nodes.String(p.tokens[index].content), index + 1 elif token_is(index, token_kinds.char_string): chars = p.tokens[index].content return expr_nodes.Number(chars[0]), index + 1 else: raise_error("expected expression", index, ParserError.GOT)
2413a0793062e2cfa52fb8d922c21a3af7d06a66
414
def chopper_pulses_of_mode(i): """How many single pulses the chopper transmits per opening, or in hybrid mode, how many single bunches the tranmitted intensity corresponds to, based on the current settings of the chopper. i: 0-based integer""" if isnan(i) or i<0 or i>=len(chopper.pulses): return nan return chopper.pulses[int(i)]
c10ade662b515cfdde721b9c7c8cb2aac7fa8c03
415
def _get_content_from_tag(tag): """Gets the content from tag till before a new section.""" contents = [] next_tag = tag while next_tag and not _is_section(next_tag): content = parse_content(next_tag.text()) if content: contents.append(content) next_tag = next_tag.next return ' '.join(contents)
832f01b7db2a5c2cdcc3454b1253a8399464952e
416
async def get_connections(request: data_models.ConnectionsRequest): """Get connections *from* and *to* each entity in the request. Connections *to* are all the subject-predicate pairs where the entity is the object, and connections *from* are all the predicate-object pairs where the entity is the subject.""" response = {} for ent in request.entities: ent_normalised = utils.normaliseURI(ent) connections_from = sparql_connector.get_sparql_results( sparql.get_p_o(ent_normalised, labels=request.labels, limit=request.limit) )["results"]["bindings"] connections_to = sparql_connector.get_sparql_results( sparql.get_s_p(ent_normalised, labels=request.labels, limit=request.limit) )["results"]["bindings"] for predicate_object_dict in connections_from: if ( "collections.vam.ac.uk" in predicate_object_dict["object"]["value"] ) and "objectLabel" not in predicate_object_dict: object_label = utils.get_vam_object_title( predicate_object_dict["object"]["value"] ) if object_label is not None: predicate_object_dict["objectLabel"] = dict() predicate_object_dict["objectLabel"]["type"] = "literal" predicate_object_dict["objectLabel"]["value"] = object_label for subject_predicate_dict in connections_to: if ( "collections.vam.ac.uk" in subject_predicate_dict["subject"]["value"] ) and "subjectLabel" not in subject_predicate_dict: subject_label = utils.get_vam_object_title( subject_predicate_dict["subject"]["value"] ) if subject_label is not None: subject_predicate_dict["subjectLabel"] = dict() subject_predicate_dict["subjectLabel"]["type"] = "literal" subject_predicate_dict["subjectLabel"]["value"] = subject_label response.update( { ent: { "from": connections_from, "to": connections_to, } } ) return response
e25a363cbd4cbbaa7a2f36132f73fcfa2ebd1d3c
417
def sunlight_duration(hour_angle_sunrise): """Returns the duration of Sunlight, in minutes, with Hour Angle in degrees, hour_angle.""" sunlight_durration = 8 * hour_angle_sunrise # this seems like the wrong output return sunlight_durration
b2887dd86caf25e7cac613bfa10b4de26c932c09
418
import warnings def add_particle_bunch_gaussian(sim, q, m, sig_r, sig_z, n_emit, gamma0, sig_gamma, n_physical_particles, n_macroparticles, tf=0., zf=0., boost=None, save_beam=None, z_injection_plane=None, initialize_self_field=True): """ Introduce a relativistic Gaussian particle bunch in the simulation, along with its space charge field. The bunch is initialized with a normalized emittance `n_emit`, in such a way that it will be focused at time `tf`, at the position `zf`. Thus if `tf` is not 0, the bunch will be initially out of focus. (This does not take space charge effects into account.) Parameters ---------- sim : a Simulation object The structure that contains the simulation. q : float (in Coulomb) Charge of the particle species m : float (in kg) Mass of the particle species sig_r : float (in meters) The transverse RMS bunch size. sig_z : float (in meters) The longitudinal RMS bunch size. n_emit : float (in meters) The normalized emittance of the bunch. gamma0 : float The Lorentz factor of the electrons. sig_gamma : float The absolute energy spread of the bunch. n_physical_particles : float The number of physical particles (e.g. electrons) the bunch should consist of. n_macroparticles : int The number of macroparticles the bunch should consist of. zf: float (in meters), optional Position of the focus. tf : float (in seconds), optional Time at which the bunch reaches focus. boost : a BoostConverter object, optional A BoostConverter object defining the Lorentz boost of the simulation. save_beam : string, optional Saves the generated beam distribution as an .npz file "string".npz z_injection_plane: float (in meters) or None When `z_injection_plane` is not None, then particles have a ballistic motion for z<z_injection_plane. This is sometimes useful in boosted-frame simulations. `z_injection_plane` is always given in the lab frame. initialize_self_field: bool, optional Whether to calculate the initial space charge fields of the bunch and add these fields to the fields on the grid (Default: True) """ # Generate Gaussian gamma distribution of the beam if sig_gamma > 0.: gamma = np.random.normal(gamma0, sig_gamma, n_macroparticles) else: # Zero energy spread beam gamma = np.full(n_macroparticles, gamma0) if sig_gamma < 0.: warnings.warn( "Negative energy spread sig_gamma detected." " sig_gamma will be set to zero. \n") # Get inverse gamma inv_gamma = 1. / gamma # Get Gaussian particle distribution in x,y,z x = sig_r * np.random.normal(0., 1., n_macroparticles) y = sig_r * np.random.normal(0., 1., n_macroparticles) z = zf + sig_z * np.random.normal(0., 1., n_macroparticles) # Define sigma of ux and uy based on normalized emittance sig_ur = (n_emit / sig_r) # Get Gaussian distribution of transverse normalized momenta ux, uy ux = sig_ur * np.random.normal(0., 1., n_macroparticles) uy = sig_ur * np.random.normal(0., 1., n_macroparticles) # Finally we calculate the uz of each particle # from the gamma and the transverse momenta ux, uy uz_sqr = (gamma ** 2 - 1) - ux ** 2 - uy ** 2 # Check for unphysical particles with uz**2 < 0 mask = uz_sqr >= 0 N_new = np.count_nonzero(mask) if N_new < n_macroparticles: warnings.warn( "Particles with uz**2<0 detected." " %d Particles will be removed from the beam. \n" "This will truncate the distribution of the beam" " at gamma ~= 1. \n" "However, the charge will be kept constant. \n"%(n_macroparticles - N_new)) # Remove unphysical particles with uz**2 < 0 x = x[mask] y = y[mask] z = z[mask] ux = ux[mask] uy = uy[mask] inv_gamma = inv_gamma[mask] uz_sqr = uz_sqr[mask] # Calculate longitudinal momentum of the bunch uz = np.sqrt(uz_sqr) # Get weight of each particle w = n_physical_particles / N_new * np.ones_like(x) # Propagate distribution to an out-of-focus position tf. # (without taking space charge effects into account) if tf != 0.: x = x - ux * inv_gamma * c * tf y = y - uy * inv_gamma * c * tf z = z - uz * inv_gamma * c * tf # Save beam distribution to an .npz file if save_beam is not None: np.savez(save_beam, x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, inv_gamma=inv_gamma, w=w) # Add the electrons to the simulation ptcl_bunch = add_particle_bunch_from_arrays(sim, q, m, x, y, z, ux, uy, uz, w, boost=boost, z_injection_plane=z_injection_plane, initialize_self_field=initialize_self_field) return ptcl_bunch
3aedae4d9ff7ec6026b3153946939858db48d332
419
def sell_shares_nb(cash_now, shares_now, size, direction, price, fees, fixed_fees, slippage, min_size, allow_partial, raise_reject, log_record, log): """Sell shares.""" # Get optimal order size if direction == Direction.LongOnly: final_size = min(shares_now, size) else: final_size = size # Check against minimum size if abs(final_size) < min_size: if raise_reject: raise RejectedOrderError("Order rejected: Final size is less than minimum allowed") return order_not_filled_nb( cash_now, shares_now, OrderStatus.Rejected, StatusInfo.MinSizeNotReached, log_record, log) # Check against partial fill if np.isfinite(size) and is_less_nb(final_size, size) and not allow_partial: # np.inf doesn't count if raise_reject: raise RejectedOrderError("Order rejected: Final size is less than requested") return order_not_filled_nb( cash_now, shares_now, OrderStatus.Rejected, StatusInfo.PartialFill, log_record, log) # Get price adjusted with slippage adj_price = price * (1 - slippage) # Compute acquired cash acq_cash = final_size * adj_price # Update fees fees_paid = acq_cash * fees + fixed_fees # Get final cash by subtracting costs if is_less_nb(acq_cash, fees_paid): # Can't fill if raise_reject: raise RejectedOrderError("Order rejected: Fees cannot be covered") return order_not_filled_nb( cash_now, shares_now, OrderStatus.Rejected, StatusInfo.CantCoverFees, log_record, log) final_cash = acq_cash - fees_paid # Update current cash and shares new_cash = cash_now + final_cash new_shares = add_nb(shares_now, -final_size) # Return filled order order_result = OrderResult( final_size, adj_price, fees_paid, OrderSide.Sell, OrderStatus.Filled, -1 ) if log: fill_res_log_nb(new_cash, new_shares, order_result, log_record) return new_cash, new_shares, order_result
cf16446421ae19aa7f1d142ee195d5d9dfa20bdf
420
def radec2altaz(ra, dec, obstime, lat=None, lon=None, debug=False): """ calculates the altitude and azimuth, given an ra, dec, time, and observatory location Parameters: =========== - ra: float The right ascension of the target (in degrees) - dec: float The declination of the target (in degrees) - obstime: astropy.time.Time object Contains the time of the observation. Can also contain the observatory location if lat and lon are not given. - lat: float The latitude of the observatory, in degrees. Not needed if given in the obstime object - lon: float The longitude of the observatory, in degrees. Not needed if given in the obstime object Returns: ======== The altitude and azimuth of the object, both in degrees. """ if lat is None: lat = obstime.lat.degree if lon is None: lon = obstime.lon.degree obstime = Time(obstime.isot, format='isot', scale='utc', location=(lon, lat)) # Find the number of days since J2000 j2000 = Time("2000-01-01T12:00:00.0", format='isot', scale='utc') dt = (obstime - j2000).value # number of days since J2000 epoch # get the UT time tstring = obstime.isot.split("T")[-1] segments = tstring.split(":") ut = float(segments[0]) + float(segments[1]) / 60.0 + float(segments[2]) / 3600 # Calculate Local Sidereal Time lst = obstime.sidereal_time('mean').deg # Calculate the hour angle HA = lst - ra while HA < 0.0 or HA > 360.0: s = -np.sign(HA) HA += s * 360.0 # convert everything to radians dec *= np.pi / 180.0 lat *= np.pi / 180.0 HA *= np.pi / 180.0 # Calculate the altitude alt = np.arcsin(np.sin(dec) * np.sin(lat) + np.cos(dec) * np.cos(lat) * np.cos(HA)) # calculate the azimuth az = np.arccos((np.sin(dec) - np.sin(alt) * np.sin(lat)) / (np.cos(alt) * np.cos(lat))) if np.sin(HA) > 0: az = 2.0 * np.pi - az if debug: print( "UT: ", ut) print( "LST: ", lst) print( "HA: ", HA * 180.0 / np.pi) return alt * 180.0 / np.pi, az * 180.0 / np.pi
06a097fd23e6462f676c028a3300172cd21ae284
421
import traceback def handler_no_answer(f): """Decorator that creates message handlers that don't reply.""" def handle_wrapper(*args, **kwds): answer = None try: f(*args, **kwds) except Exception: return MSG_STATUS_ERROR, [ 'Calling the cmd handler caused an error:\n{}'.format(traceback.format_exc()) ], {} return handle_wrapper
79f1e0d30eab4d7beb500a3a75f7b7e4415e311c
422
import re def wrapper_handle_attrs(func): """转化html的标签属性为字典""" # 这是一个装饰Parsing.handle_attrs_tmp、Parsing.handle_attrs_tag的装饰器 def handle_attrs(self, attrs_str): attrs = dict() if attrs_str == '/': return attrs attrs_list = re.findall(self.attr_reg, attrs_str) for attr in attrs_list: attrs[attr[0]] = func(self, attr) return attrs return handle_attrs
d7396433c9721c26c8d419d4e78f2b8445f5dd70
423
def transfer_weights(model, weights=None): """ Always trains from scratch; never transfers weights :param model: :param weights: :return: """ print('ENet has found no compatible pretrained weights! Skipping weight transfer...') return model
2b8b5e7d3ad72deea42ffccea6a561eac3b72320
424
def collapse_json(text, indent=4): """Compacts a string of json data by collapsing whitespace after the specified indent level NOTE: will not produce correct results when indent level is not a multiple of the json indent level """ initial = " " * indent out = [] # final json output sublevel = [] # accumulation list for sublevel entries pending = None # holder for consecutive entries at exact indent level for line in text.splitlines(): if line.startswith(initial): if line[indent] == " ": # found a line indented further than the indent level, so add # it to the sublevel list if pending: # the first item in the sublevel will be the pending item # that was the previous line in the json sublevel.append(pending) pending = None item = line.strip() sublevel.append(item) if item.endswith(","): sublevel.append(" ") elif sublevel: # found a line at the exact indent level *and* we have sublevel # items. This means the sublevel items have come to an end sublevel.append(line.strip()) out.append("".join(sublevel)) sublevel = [] else: # found a line at the exact indent level but no items indented # further, so possibly start a new sub-level if pending: # if there is already a pending item, it means that # consecutive entries in the json had the exact same # indentation and that last pending item was not the start # of a new sublevel. out.append(pending) pending = line.rstrip() else: if pending: # it's possible that an item will be pending but not added to # the output yet, so make sure it's not forgotten. out.append(pending) pending = None if sublevel: out.append("".join(sublevel)) out.append(line) return "\n".join(out)
625868ca90aab0be50cf6d2fdb2926d395d83301
425
import ast def get_skills_v1(): """ READING THE FIRST SKILLSET """ f = open('skills_v1.json', 'rb') for a in f: skills_v1 = ast.literal_eval(a) f.close() return skills_v1
c71b7ed4fc6579ea21a0aecceaf38be81a32964b
426
from typing import Tuple from typing import List from typing import Counter def create_mask(board: np.ndarray, dimensions: Tuple[int, int]) -> List[List[int]]: """ Function to create Mask of possible valid values based on the initial sudoku Board. """ mask = list(board.tolist()) counts = Counter(board.flatten()) del counts[0] counts = [number[0] for number in counts.most_common()] most_common_clues = counts for clue in range(dimensions[0], dimensions[1]): if clue not in most_common_clues: most_common_clues.append(clue) for i, row in enumerate(mask): if 0 in row: while 0 in row: zero_index = row.index(0) mask[i][zero_index] = [] for number in most_common_clues: if valid(board, number, (i, zero_index), box_size): mask[i][zero_index].append(number) else: for number in row: if number != 0: mask[i][row.index(number)] = {number} return mask
a8e4c68a55c96ad7502464934226f8909dbf18cd
427
def telegram(text: str, token: str, chat_id: int) -> str: """Send a telegram message""" webhookAddress = f"https://api.telegram.org/bot{token}/sendMessage?" + urlencode({"text":text, "chat_id":chat_id}) handler = urlopen(webhookAddress) return handler.read().decode('utf-8')
c80727f5e482b3e9bb48c28c3cc9e688228733fc
428
def match_term(term, dictionary, case_sensitive, lemmatize=True): """ Parameters ---------- term dictionary case_sensitive lemmatize Including lemmas improves performance slightly Returns ------- """ if (not case_sensitive and term.lower() in dictionary) or term in dictionary: return True if (case_sensitive and lemmatize) and term.rstrip('s').lower() in dictionary: return True elif (not case_sensitive and lemmatize) and term.rstrip('s') in dictionary: return True return False
aba706a211cf68e7c8c1668200da3f9c8613b3d2
429
import requests import json import csv def fill_user(user_ids, filename='user', write=True): """ Input: user_ids dictionary (user ids: task values) Output: csv file with user id, name, email """ emails = {} for user in user_ids: r = requests.get('https://pe.goodlylabs.org' '/api/user/{}?api_key={}&limit=100' .format(user, PYBOSSA_API_KEY), headers=headers) user_info = json.loads(r.text) emails[user] = [user_info['fullname'], user_info['email_addr']] if write: with open('{}.csv'.format(filename), 'w') as f: writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(["id", "name", "email"]) for i in emails: writer.writerow([i, emails[i][0], emails[i][1]]) return emails
0ee2eb7104ee177a13d78e6e05b6fb787736bb06
430
def project_along_flow(dX_raw,dY_raw,dX_prio,dY_prio,e_perp): """ Parameters ---------- dX_raw : np.array, size=(m,n), dtype=float raw horizontal displacement with mixed signal dY_raw : np.array, size=(m,n), dtype=float raw vertical displacement with mixed signal dX_prio : np.array, size=(m,n), dtype=float reference of horizontal displacement (a-priori knowledge) dY_prio : np.array, size=(m,n), dtype=float reference of vertical displacement (a-priori knowledge) e_perp : np.array, size=(2,1), float vector in the perpendicular direction to the flightline (bearing). Returns ------- dX_proj : np.array, size=(m,n), dtype=float projected horizontal displacement in the same direction as reference. dY_proj : np.array, size=(m,n), dtype=float projected vertical displacement in the same direction as reference. Notes ----- The projection function is as follows: .. math:: P = ({d_{x}}e^{\perp}_{x} - {d_{y}}e^{\perp}_{y}) / ({\hat{d}_{x}}e^{\perp}_{x} - {\hat{d}_{y}}e^{\perp}_{y}) See also Equation 10 and Figure 2 in [1]. Furthermore, two different coordinate system are used here: .. code-block:: text indexing | indexing ^ y system 'ij'| system 'xy' | | | | i | x --------+--------> --------+--------> | | | | image | j map | based v based | References ---------- .. [1] Altena & Kääb. "Elevation change and improved velocity retrieval using orthorectified optical satellite data from different orbits" Remote Sensing vol.9(3) pp.300 2017. """ # e_{\para} = bearing satellite... assert(dX_raw.size == dY_raw.size) # all should be of the same size assert(dX_prio.size == dY_prio.size) assert(dX_raw.size == dX_prio.size) d_proj = ((dX_raw*e_perp[0])-(dY_raw*e_perp[1])) /\ ((dX_prio*e_perp[0])-(dY_prio*e_perp[1])) dX_proj = d_proj * dX_raw dY_proj = d_proj * dY_raw return dX_proj,dY_proj
fe0565667b77954b1df07d7ea31cbb620b20f800
431
from typing import Mapping import select def get_existing_pks(engine: Engine, table: Table) -> Mapping[int, dict]: """ Creates an index of hashes of the values of the primary keys in the table provided. :param engine: :param table: :return: """ with engine.connect() as conn: pk_cols = [table.c[col.name] for col in table.columns if col.primary_key] query = select(pk_cols) result = conn.execute(query) return {hash_row_els(dict(row), [col.name for col in pk_cols]): dict(row) for row in result}
fead502b36f67c6732ba4b0e4e678af4fd96ed53
432
def create_transform_parameters( fill_mode = 'nearest', interpolation = 'linear', cval = 0, data_format = None, relative_translation = True, ): """ Creates a dictionary to store parameters containing information on method to apply transformation to an image # Arguments fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap' interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4' cval: Fill value to use with fill_mode='constant' data_format: Same as for keras.preprocessing.image_transform.apply_transform relative_translation: If true (the default), interpret translation as a factor of the image size. If false, interpret it as absolute pixels. """ # Apply processing to input arguments if data_format is None: data_format = 'channels_last' if data_format == 'channels_first': channel_axis = 0 elif data_format == 'channels_last': channel_axis = 2 else: raise ValueError("invalid data_format, expected 'channels_first' or 'channels_last', got '{}'".format(data_format)) if fill_mode == 'constant': cv_border_mode = cv2.BORDER_CONSTANT if fill_mode == 'nearest': cv_border_mode = cv2.BORDER_REPLICATE if fill_mode == 'reflect': cv_border_mode = cv2.BORDER_REFLECT_101 if fill_mode == 'wrap': cv_border_mode = cv2.BORDER_WRAP if interpolation == 'nearest': cv_interpolation = cv2.INTER_NEAREST if interpolation == 'linear': cv_interpolation = cv2.INTER_LINEAR if interpolation == 'cubic': cv_interpolation = cv2.INTER_CUBIC if interpolation == 'area': cv_interpolation = cv2.INTER_AREA if interpolation == 'lanczos4': cv_interpolation = cv2.INTER_LANCZOS4 # Create attribute dict to store parameters _p = AttrDict( fill_mode=fill_mode, interpolation=interpolation, cval=cval, relative_translation=relative_translation, data_format=data_format, channel_axis=channel_axis, cv_border_mode=cv_border_mode, cv_interpolation=cv_interpolation ) _p.immutable(True) return _p
2172c283b53d76881877b618043885aa596507d4
433
def error_rate(model, dataset): """Returns error rate for Keras model on dataset.""" d = dataset['dimension'] scores = np.squeeze(model.predict(dataset['features'][:, :, 0:d]), axis=-1) diff = scores[:, 0] - scores[:, 1] return np.mean(diff.reshape((-1)) <= 0)
b16b234ead64737eb2d40b3aab612270ed86dc0a
434
from typing import Callable import functools def k8s_cr_callback(func: Callable) -> Callable: """ Decorate a method as a K8s CR callback. Is working only for K8sCRHandler and child classes. """ @functools.wraps(func) def decorated_func(self, *args, **kwargs): """Provide automatic locking of CRs in process by this method.""" # Ensure that signature of K8sCRHandler._callback stays the same name = args[0] labels = args[1] operation = args[2] blocking = bool(operation != 'REPROCESS') locked = self.cr_locks[name].acquire(blocking=blocking) if locked: _LOGGER.debug( 'CR "%s" locked by operation "%s" with label "%s"', name, operation, labels) try: return func(self, *args, **kwargs) finally: self.cr_locks[name].release() _LOGGER.debug( 'CR "%s" unlocked by operation "%s" with label "%s"', name, operation, labels) # Cleanup lock objects dictionary when CR was deleted if operation == 'DELETED': self.cr_locks.pop(name, None) else: _LOGGER.debug( 'CR "%s" in process - skipping operation "%s" this run', name, operation) _LOGGER.debug('Method "%s" is decorated as K8s callback method', func) return decorated_func
8cc6bc8911ba271ee19042b0337def25fc334868
435
def account(): """Update the user's account""" return _templates.account(UserContext.user())
4624bcce3987e71edcd8d720eea22b52658c1352
436
from typing import Awaitable import asyncio def run_synchronously(computation: Awaitable[TSource]) -> TSource: """Runs the asynchronous computation and await its result.""" return asyncio.run(computation)
2aa14167a2de06a85862b0b0c9294c76fa8ed012
437
from datetime import datetime from typing import Optional from pydantic import BaseModel # noqa: E0611 import cmd from typing import cast def create_running_command( command_id: str = "command-id", command_key: str = "command-key", command_type: str = "command-type", created_at: datetime = datetime(year=2021, month=1, day=1), params: Optional[BaseModel] = None, ) -> cmd.Command: """Given command data, build a running command model.""" return cast( cmd.Command, cmd.BaseCommand( id=command_id, key=command_key, createdAt=created_at, commandType=command_type, status=cmd.CommandStatus.RUNNING, params=params or BaseModel(), ), )
a9e38fd534aaf29ebe265279eb86ed90233113fb
438
def x11_linux_stop_record(): """ stop test_record action """ return xwindows_listener.stop_record()
3a5e4728f8d6d27083ee43c26c84ba22133e0621
439
def yxy_intrinsic(mat: np.ndarray) -> np.ndarray: """Return yxy intrinsic Euler angle decomposition of mat (.., 4, 4))""" # extract components not_nan, r00, r01, r02, r10, r11, r12, _, r21, _ = extract_mat_components(mat) # pre-initialize results theta_y0 = np.full(not_nan.shape, np.nan) theta_x = np.full(not_nan.shape, np.nan) theta_y1 = np.full(not_nan.shape, np.nan) # compute Euler angles theta_y0[not_nan] = np.where(r11 < 1, np.where(r11 > -1, np.arctan2(-r01, -r21), 0), 0) theta_x[not_nan] = np.where(r11 < 1, np.where(r11 > -1, -np.arccos(r11), -np.pi), 0) theta_y1[not_nan] = np.where(r11 < 1, np.where(r11 > -1, np.arctan2(-r10, r12), np.arctan2(r02, r00)), np.arctan2(r02, r00)) return np.stack((theta_y0, theta_x, theta_y1), -1)
d7fd0ab01c3c7cf27839caff53a905294e47b7ba
440
def mnemonic_and_path_to_key(*, mnemonic: str, path: str, password: str) -> int: """ Return the SK at position `path`, derived from `mnemonic`. The password is to be compliant with BIP39 mnemonics that use passwords, but is not used by this CLI outside of tests. """ seed = get_seed(mnemonic=mnemonic, password=password) sk = derive_master_SK(seed) for node in path_to_nodes(path): sk = derive_child_SK(parent_SK=sk, index=node) return sk
6127278c78a1e52e362c2d66c4eb065f63de0ba9
441
import inspect def test_function_with_annotations(): """Parse a function docstring with signature annotations.""" def f(x: int, y: int, *, z: int) -> int: """ This function has annotations. Parameters: x: X value. y: Y value. Keyword Arguments: z: Z value. Returns: Sum X + Y. """ return x + y sections, errors = parse(inspect.getdoc(f), inspect.signature(f)) assert len(sections) == 4 assert not errors
5fe7d046659a9e1511f8f321d186cd6e8f1d8d43
442
def acceleration(bodies, i, j): """ Calculer l'acceleration relative à un objet bodies[i] bodies: tous les objets i: index of concerned body which undergoes the gravitation of other objects. j: index of the step """ N = len(bodies) ax = 0; ay = 0; az = 0 #L'acceleration for ip in range(N): #Chaque objet bodies[ip] applique une force de gravitation sur l'objet bodies[i] if ip == i: #On veut que pas avoir le même objet bodies[ip] continue # print(fx(bodies[ip].masse, bodies[i].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j])) ax += fx(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j]) ay += fy(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j]) az += fz(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j]) return (ax, ay, az)
e19ab098a72d43d0f28931d853866ba0999bd39d
443
import re def formatted(s): """If s contains substrings of form '#'<txt>'#', '(('<txt>'))', "''"<txt>"''", returns list of tuples (FORMAT_x, txt). Otherwise, returns s. """ matches = re.findall(_format_re, normalize(s)) if len(matches) == 1 and matches[0][0] != '': return matches[0][0] def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a): if txt_none != '': return FORMAT_NONE, txt_none elif txt_sw != '': return FORMAT_SW, txt_sw elif txt_rem != '': return FORMAT_REM, txt_rem elif txt_em != '': return FORMAT_EM, txt_em elif txt_a != '': return FORMAT_A, txt_a return [to_fmt(*m) for m in matches]
e164d743c5284de744948ab9db72f8887e380dc2
444
def deep_len(lnk): """ Returns the deep length of a possibly deep linked list. >>> deep_len(Link(1, Link(2, Link(3)))) 3 >>> deep_len(Link(Link(1, Link(2)), Link(3, Link(4)))) 4 >>> levels = Link(Link(Link(1, Link(2)), \ Link(3)), Link(Link(4), Link(5))) >>> print(levels) <<<1 2> 3> <4> 5> >>> deep_len(levels) 5 """ if not lnk: return 0 if type(lnk.first) == int: return 1 + deep_len(lnk.rest) return deep_len(lnk.first) + deep_len(lnk.rest)
d8a33600085e51b181752b2dd81d5bcdae7aaff9
445
def union(A, B): """ Add two subspaces (A, B) together. Args: - A: a matrix whose columns span subspace A [ndarray]. - B: a matrix whose columns span subspace B [ndarray]. Returns: - union: a matrix whose columns form the orthogonal basis for subspace addition A+B [ndarray]. """ m,n = A.shape x,y = B.shape if m != x: raise Exception('input matrices need to be of same height'); T = np.hstack((A, B)) return image(T)
d88f09cd4be80d06d7ae0d6e8397e46910f81a90
446
def ldns_create_nsec(*args): """LDNS buffer.""" return _ldns.ldns_create_nsec(*args)
f9e8fd181f4476c745a9ac12c513e24e7939e2e3
447
def str_to_seconds(time): """ Returns the number of seconds since midnight in the string time (as an int). The value time is a string in extended ISO 8601 format. That is, it has the form 'hh:mm:ss' where h, m, and s are digits. There must be exactly two digits each for hours, minutes, and seconds, so they are padded with 0s when necessary. So seconds, minutes, and hours may have leading 0s if they are only one digit. For more information, see https://en.wikipedia.org/wiki/ISO_8601#Times This function does not support time zones, abbreviated formats, or decimals Example: str_to_seconds('12:35:15') returns 45315 Example: str_to_seconds('03:02:05') returns 10925 Example: str_to_seconds('00:00:00') returns 0 Parameter time: The string representation of the time Precondition: time is a string in extended ISO 8601 format 'hh:mm:ss' """ assert type(time) == str assert len(time) == 8 assert iso_8601(time) == True result = get_hours(time)*60*60 + get_minutes(time)*60 + get_seconds(time) return result # assert iso_8601(time) == True | works but not whats needed # assert type(time[get_hours(time):get_seconds(time)]) == str | works but not whats needed # ¬ assert time == str # change params in fn from time to hr, min, sec | str concatination? #assert introcs.isdigit(time[0:1+1]) and introcs.isdigit(time[3:4+1]) and introcs.isdigit(time[6:7+1]) == True #assert type(time[get_hours(time):get_seconds(time)]) == str #print(time[0:1+1], time[3:4+1], time[6:7+1])
3902e5743567f6d07e2c78fa76e5bc2fc0d6306f
448
def conv_variance_scaling_initializer(in_channel, out_channel, kernel_size): """conv init""" fan_in = in_channel * kernel_size * kernel_size scale = 1.0 scale /= max(1., fan_in) stddev = (scale ** 0.5) / .87962566103423978 mu, sigma = 0, stddev weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size) weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size)) return Tensor(weight, dtype=mstype.float32)
925339a12e4f2e04c403ad8148145df0497da0da
449
def vgg8(**kwargs): """VGG 8-layer model (configuration "S") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['S'], **kwargs) return model
61d9f2e98c68691c1ed26631220f447eef28ba11
450
def get_char_from_ascii(key_num): """Function that converts a character to an ascii code Parameters ---------- ascii_code : int Ascii code of character Returns ------- char : character character converted from ascii """ return chr(key_num)
79f6a5627805909a005d5921f4e9fe738fb09936
451
import os import glob def get_files_path(file_path: str) -> list: """Get all files path Args: file_path: root folder path Returns: list: list of string containing all files paths """ filepath='data' all_files = [] for root, dirs, files in os.walk(filepath): files = glob.glob(os.path.join(root,'*.json')) for f in files: all_files.append(f) return all_files
d775bbe229b1ad53c173ae5d98246b04a3050dfa
452
def start(): """ view for data entry for optimisation """ form = LocationForm() if form.validate_on_submit(): return optimise(form.data) return flask.render_template("start.html", title="Start", form=form)
caba5a4d20d544ed480bda4d4e4d4377880bbd40
453
def add_lldp_filter_by_host(query, hostid): """Adds a lldp-specific ihost filter to a query. Filters results by host id if supplied value is an integer, otherwise attempts to filter results by host uuid. :param query: Initial query to add filter to. :param hostid: host id or uuid to filter results by. :return: Modified query. """ if utils.is_int_like(hostid): return query.filter_by(host_id=hostid) elif utils.is_uuid_like(hostid): query = query.join(models.Hosts) return query.filter(models.Hosts.uuid == hostid) LOG.debug("lldp_filter_by_host: " "No match for supplied filter id (%s)" % str(hostid))
fd5632660897bf89e5cedb2ef5d64c06c1aab0d9
454
def c_flag(opt, test_not=False): """ convert a test parameter into t if true for the Fortran build system """ if test_not: if opt: return "FALSE" else: return "TRUE" else: if opt: return "TRUE" else: return "FALSE"
cf78668ae19287822fba9946fa472187848e0084
455
def false_function(): """Sample function to test unit testing.""" return False
7823ac0f533c97544a8f73f73715bebb8e5b45cc
456
def broker_task_send(task_uuid, request, broker_point, reply_to=None): """Command to publish `primitives.Request` to customer Args: task_uuid(str): task identification request: Serialized request broker_point(gromozeka.BrokerPoint): reply_to(gromozeka.BrokerPoint): Returns: Command: """ return Command(command=BROKER_TASK_SEND, args={'task_uuid': task_uuid, 'request': request, 'broker_point': broker_point, 'reply_to': reply_to}).as_tuple()
52b389982676f65547f10a2cd45ac225e6486673
457
import numpy def process_axis_labels(datadesc, blobs, offset=0): """Convert the raw axis label descriptions. Similar to LiveDataPanel._process_axis_labels, but is flexible in datadesc. """ CLASSIC = {'define': 'classic'} labels = {} titles = {} for size, axis in zip(reversed(datadesc['shape']), AXES): # if the 'labels' key does not exist or does not have the right # axis key set default to 'classic'. label = datadesc.get( 'labels', {'x': CLASSIC, 'y': CLASSIC}).get(axis, CLASSIC) if label['define'] == 'range': start = label.get('start', 0) size = label.get('length', 1) step = label.get('step', 1) end = start + step * size labels[axis] = numpy.arange(start, end, step) elif label['define'] == 'array': index = label.get('index', 0) labels[axis] = numpy.frombuffer(blobs[index], label.get('dtype', '<i4')) else: labels[axis] = numpy.array(range(size)) labels[axis] += offset if axis == 'x' else 0 titles[axis] = label.get('title') return labels, titles
d0f880c69160b2a620affe7b1cfe8c7dda12d807
458
def _to_ranks_by_group(dat, group, formula, exclude_cols=[]): """ Covert predictors to ranks separately for each group for use in rank Lmer. Any columns not in the model formula or in exclude_cols will not be converted to ranks. Used by models.Lmer Args: dat (pd.DataFrame): dataframe of data group (string): string name of column to group data on formula (string): Lmer flavored model formula with random effects exclude_cols (list): optional columns that are part of the formula to exclude from rank conversion. Returns: pandas.core.frame.DataFrame: ranked data """ if (not isinstance(group, str)) and (group not in dat.columns): raise TypeError( "group must be a valid column name in the dataframe. Currently only 1 grouping variable is supported." ) if isinstance(exclude_cols, str): exclude_cols = [exclude_cols] original_col_order = list(dat.columns) formula = formula.replace(" ", "") to_rank = formula.split("~")[-1].split("(")[0].split("+")[:-1] # add dv to be ranked to_rank.append(formula.split("~")[0]) to_rank = [c for c in to_rank if c not in exclude_cols] other_cols = [c for c in dat.columns if c not in to_rank] dat = pd.concat( [dat[other_cols], dat.groupby(group).apply(lambda g: g[to_rank].rank())], axis=1 ) return dat[original_col_order]
6cff465b0a1877d6594953dda75913dfb36a67ad
459
def build_input_data(sentences, labels, vocabulary): """ Maps sentencs and labels to vectors based on a vocabulary. """ # With capped vocab, need to account for word not present in # vocab. Using the padding word. # TODO -- pass padding word in as an arg padding_word = "<PAD/>" pad_idx = vocabulary[padding_word] x = np.array( [[vocabulary.get(word, pad_idx) for word in sentence] for sentence in sentences]) y = np.array(labels) return [x, y]
6188ebbf3f4b1172f6316487af3ca468bab096b3
460
def list_scans(): """ :return: A JSON containing a list of: - Scan resource URL (eg. /scans/1) - Scan target - Scan status """ data = [] for scan_id, scan_info in SCANS.iteritems(): if scan_info is None: continue target_urls = scan_info.target_urls status = scan_info.w3af_core.status.get_simplified_status() errors = True if scan_info.exception is not None else False data.append({'id': scan_id, 'href': '/scans/%s' % scan_id, 'target_urls': target_urls, 'status': status, 'errors': errors}) return jsonify({'items': data})
60d5eb5c33c09ac6e35ffae2c10b6aca566c6027
461
def factor_list(f, *gens, **args): """ Compute a list of irreducible factors of ``f``. **Examples** >>> from sympy import factor_list >>> from sympy.abc import x, y >>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y) (2, [(x + y, 1), (1 + x**2, 2)]) """ return _generic_factor_list(f, gens, args, method='factor')
c13e503a631d3bfc5ead05dc8de8cc5243614241
462
import pickle import torch def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = dist.get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.LongTensor([tensor.numel()]).to("cuda") size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) if local_size != max_size: padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list
9e89ed2f299f5de8dec55d5529478177d45c21fa
463
import torch def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=()): """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights Parameters: net (network) -- the network to be initialized init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal init_gain (float) -- scaling factor for normal, xavier and orthogonal. gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 Return an initialized network. """ if len(gpu_ids) > 0: assert(torch.cuda.is_available()) net.to(gpu_ids[0]) net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs init_weights(net, init_type, init_gain=init_gain) return net
2ddeda15b84bca0b83a7b5b516c83f991cec44c7
464
def remove_from_group(group_name, nodes=None, nodes_by_col='SUID', edges=None, edges_by_col='SUID', network=None, base_url=DEFAULT_BASE_URL): """Remove the specified nodes and edges from the specified group. Args: group_name (str): Specifies the name used to identify the group nodes (list or str or int or None): List of nodes or keyword: selected, unselected or all. If node list: ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name or SUID). Node names should be found in the ``SUID`` column of the ``node table`` unless specified in ``nodes_by_col``. If list is None, default is currently selected nodes. nodes_by_col (str): name of node table column corresponding to provided nodes list. Default is 'SUID'. edges (str or list or int or None): List of edges or keyword: selected, unselected or all. If edge list: ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar edge name or SUID). Edge names should be found in the ``SUID`` column of the ``edge table`` unless specified in ``edges_by_col``. If list is None, default is currently selected edges. edges_by_col (str): name of edge table column corresponding to provided edges list. Default is 'SUID'. network (SUID or str or None): Name or SUID of a network. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: {} Raises: CyError: if network name or SUID doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> remove_from_group('Group 1', ['GDS1', 'SIP4', 'PDC1'], nodes_by_col='COMMON') # remove nodes by common name & all their edges {} >>> remove_from_group('Group 1', 'GDS1, SIP4, PDC1', nodes_by_col='COMMON') # remove nodes by common name & all their edges {} >>> remove_from_group('Group 1', [76545, 75499, 80299]) # remove nodes by SUID & all their edges {} >>> remove_from_group('Group 1', 80299) # remove node by SUID & all its edges {} >>> remove_from_group('Group 1') # remove all selected nodes and edges {} >>> remove_from_group('Group 1', nodes=[], edges=[78565, 79565]) # remove edges but not any nodes {} >>> remove_from_group('Group 1', nodes='unselected', edges='unselected') # remove all unselected nodes and edges {} """ if isinstance(nodes, str) and nodes in {'all', 'selected', 'unselected'}: nodes_by_col = None node_list = prep_post_query_lists(nodes, nodes_by_col) if isinstance(edges, str) and edges in {'all', 'selected', 'unselected'}: edges_by_col = None edge_list = prep_post_query_lists(edges, edges_by_col) net_suid = networks.get_network_suid(network, base_url=base_url) res = commands.commands_post( f'group remove groupName="{group_name}" nodeList="{node_list}" edgeList="{edge_list}" network="SUID:{net_suid}"', base_url=base_url) return res
0f7ae3b161aa1b189be14973ddaa7a7a4fef4bbf
465
def filter_bank_2high(t, Nj, Nj_1, ac=2.0, bc=2.0): """ computes the filter bank for control points N_j, Nj_1 given the variable t :param t: data points on the real line R arranged in numpy array :param Nj: control point, Nj > Nj_1, integer :param Nj_1: control point, Nj > Nj_1, integer :param ac: between (1, 2]. Default 2.0 :param bc: bc < 2. Default 2.0 :return: (ha, hb1, hb2) low-pass filter ha and high-pass filters hb1 and hb2 at t, all in numpy array format """ # a_hat a_cR = (1 + Nj_1) / ac a_epsR = Nj_1 - a_cR a_cL = -a_cR a_epsL = a_epsR # b_hat_1 b1_cL = a_cR b1_epsL = a_epsR b1_cR = (Nj_1 + Nj) / bc b1_epsR = Nj - b1_cR # b_hat_2 b2_cL = b1_cR b2_epsL = b1_epsR b2_cR = 2 * Nj b2_epsR = 1 # supp(ha) = [0, 1 / 4] ha = hmask(t, a_cL, a_epsL, a_cR, a_epsR) # supp(hb1) = [1 / 8, 1 / 2] hb1 = hmask(t, b1_cL, b1_epsL, b1_cR, b1_epsR) # supp(hb2) = [1 / 4, 1 / 2] hb2 = hmask(t, b2_cL, b2_epsL, b2_cR, b2_epsR) return ha, hb1, hb2
a197cbd99ea4d2ce6fcf9c277cff3e634b539049
466
def to_undirected(graph, copy_node_feat=True, copy_edge_feat=False): """Convert a graph to an undirected graph. Args: graph (pgl.Graph): The input graph, should be in numpy format. copy_node_feat (bool): Whether to copy node feature in return graph. Default: True. copy_edge_feat (bool): [Alternate input] Whether to copy edge feature in return graph. Returns: g (pgl.Graph): Returns an undirected graph. """ if graph.is_tensor(): raise TypeError("The input graph should be numpy format.") inv_edges = np.zeros(graph.edges.shape) inv_edges[:, 0] = graph.edges[:, 1] inv_edges[:, 1] = graph.edges[:, 0] edges = np.vstack((graph.edges, inv_edges)) edges = np.unique(edges, axis=0) g = pgl.graph.Graph(num_nodes=graph.num_nodes, edges=edges) if copy_node_feat: for k, v in graph._node_feat.items(): g._node_feat[k] = v if copy_edge_feat: # TODO(daisiming): Support duplicate edge_feature. raise NotImplementedError( "The copy of edge feature is not implemented currently.") return g
ce82bb7d4db86d209ed4549254869eb1edbcdd09
467
def as_public(): """Return requests session without authentication""" return BaseUrlSession()
d55cc3616c6910e88d99083cf4e530987c1d8d6c
468
def transform_real_2_sim(real_position): """ Transforms a position from the 'real' coordinate system to the 'sim' coordinate system. :param real_position: dictionary with 'x', 'y' and 'z' keys to floating point values :return: position in sim space as dictionary with 'x', 'y' and 'z' keys to floating point values """ real_pos = np.array([real_position["x"], real_position["y"], 1]) sim_pos_np = np.dot(REAL_2_SIM_TRANSFORM, real_pos) sim_pos = {"x": sim_pos_np[0], "y": 0.9010001, "z": sim_pos_np[1]} return sim_pos
29b83be1f6f4e49f777e085db651e4f31d47c2e0
469
import torch def generate_tgt_mask(sz): """Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). This function is a slight modification of the version in the PyTorch repository. Parameters ---------- sz : int The length of the target sequence. """ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = ( mask.float() .masked_fill(mask == 0, float("-inf")) .masked_fill(mask == 1, float(0.0)) ) return mask
3fce5eb1cb852ca162fda58407c2cf81c1bdc849
470
def SceneAddPipeline(builder, pipeline): """This method is deprecated. Please switch to AddPipeline.""" return AddPipeline(builder, pipeline)
f220a53ad13923b1f00d208f59e575926e5b7fa2
471
def SynthesizeUserId(email): """Return a synthetic user ID from an email address. Note that this is not the same user ID found in the production system. Args: email: An email address. Returns: A string userid derived from the email address. """ user_id_digest = _MD5_FUNC(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id
fb3f81e37decaa941857ac575a5fd034f92a2324
472
import os def store_to_file(file_name, series, col_name, replace=False): """Add series to file.""" path = config.DATADIR filepath = os.path.join(path, file_name) df = pd.read_csv(filepath) if (col_name in df) and (not replace): return f'{col_name} already in {file_name}. Not added.' df[col_name] = series df.to_csv(filepath, index=False) return f'{col_name} added to {file_name}.'
60f1f9f26ee33fe51b55a223d44ddb2e71ea8db2
473
import torch def compute_jacobian(fn, x0: torch.Tensor, bs: int): """ Computes the Jacobian matrix of the given function at x0, using vector-Jacobian products """ input_shape = x0.shape assert len(input_shape) == 3 dim = x0.numel() eye = torch.eye(dim, dtype=x0.dtype, device=x0.device) # Forward pass x0rep = x0.detach()[None].repeat([bs] + [1] * len(input_shape)) # repeat along batch axis x0rep.requires_grad = True z0rep = fn(x0rep) zshape = z0rep.shape[1:] assert zshape.numel() == dim # Compute batches of rows of the Jacobian rows = [] for row_start in trange(0, dim, bs, desc='jacobian', leave=False): # Pre-pad with extra rows to ensure that batch size stays constant row_end = min(row_start + bs, dim) num_rows = row_end - row_start if num_rows != bs: assert num_rows < bs pre_pad_rows = bs - num_rows else: pre_pad_rows = 0 assert row_start - pre_pad_rows >= 0 # vector-Jacobian product with rows of an identity matrix g, = torch.autograd.grad( z0rep, x0rep, grad_outputs=eye[row_start - pre_pad_rows:row_end].reshape(row_end - row_start + pre_pad_rows, *zshape), retain_graph=True ) assert g.shape == x0rep.shape rows.append(g.view(g.shape[0], -1)[pre_pad_rows:, :]) jacobian = torch.cat(rows, dim=0) assert jacobian.shape == (dim, dim) return jacobian
c184fd03abea440e27bdd27bb1105778e7bde4b6
474
import math def pixel_distance(A, B): """ In 9th grade I sat in geometry class wondering "when then hell am I ever going to use this?"...today is that day. Return the distance between two pixels """ (col_A, row_A) = A (col_B, row_B) = B return math.sqrt(math.pow(col_B - col_A, 2) + math.pow(row_B - row_A, 2))
64853c44400428c8040ae47d1cc2cca17aed0a5f
475
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams """ tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
8be360785e38b8f427c509d63f5ecba3b6b2c020
476
def phosites_detail(text): """ create detail view output of phosphosites by accession. :param text: string of phos group ID :return: template """ results = browse_queries.browse_detail(text,'Phosphosite') table = browse_queries.phos_kin_query(text) # pass tables, results and style indicator to template for rendering, plus # variables for title info (related and text of acc no) return render_template('search_results.html', title="Phosphosite", style='double', results=results, table=table, related="Kinases", text=text)
5f1b67fadda3eb1dfe86e7e996e65197ff1eca3a
477
def convert_to_np_arrays(X): """ Converts the input arrays to dense numpy arrays to allow the methods to work properly """ try: X = X.todense() except: pass X = np.array(X) if len(X.shape) > 2: X = reduce_shape(X) return X
68fdf6fd87df160e96acec5abb8af310886fccc2
478
from docopt import docopt def main(wf): """Run the workflow. Args: wf (workflow.Workflow): Active Workflow object. """ # Parse command-line arguments and call appropriate # command function. args = docopt(__doc__, wf.args, version=wf.version) log.debug('args=%r', args) if args.get('list'): return do_list(wf, args) if args.get('open'): return do_open(wf, args) if args.get('update'): return do_update(wf, args)
80e7199b2e70dc0ce9eb0e155b4c70420e0d4966
479
def reduce_arr(arr): """ Return which elements on which axis are unique Args: arr (np.ndarray) : input array which to reduce to unique value Returns: reduced array(np.ndarray) : array with reduced data. data_axis (list) : the axises that have changing data. """ ndim = len(arr.shape) data_axis = [] slice_array = () for i in range(ndim): mn = np.min(arr, axis=i) mx = np.max(arr, axis=i) eq = np.all(mn == mx) if not eq: data_axis.append(ndim - i - 1) slice_array += (slice(None),) else: slice_array += (0,) red_ar = arr[slice_array] return red_ar, data_axis
d207876b820c7d7b30f8af6c302181620b00bf25
480
import torch def nll_lorentzian(preds, target, gamma): """ Isotropic lorentzian loss function :param preds: prediction values from NN of size [batch, particles, timesteps, (x,y,v_x,v_y)] :param target: target data of size [batch, particles, timesteps, (x,y,v_x,v_y)] :param gamma: The tensor for the FWHM of the distribution of size [batch, particles, timesteps, (x,y,v_x,v_y)] :return: value of the loss function normalised by (batch * number of atoms) """ gammasquared = gamma ** 2 neg_log_p = torch.log(1+((preds - target) ** 2 / (gammasquared))) neg_log_p += torch.log(gamma) return neg_log_p.sum() / (target.size(0) * target.size(1))
24d6ea2c4b40bc0f8c27eebf0e402261d865836e
481
from pathlib import Path def get_archive(): """Ensure that the archive file exists and return its path. This is a function so the path can be made configurable in the future. Returns: :obj:`str`: The full local path to the archive file. """ filename = '/config/archive.txt' archfile = Path(filename) if not archfile.exists(): archfile.touch() return filename
78abc493d7f256ebf53ec2cfeb9ab4f1d42b5c02
482
from typing import Sequence from typing import Callable from typing import List def _filter_unique_configs( configs: Sequence[ProblemConfig], filter_fn: Callable[[ProblemConfig], bool] = lambda _: True, ) -> List[ProblemConfig]: # pytype: disable=annotation-type-mismatch """Filters a list of problem_config to their unique occurrences for testing. Args: configs: list of ProblemConfig. filter_fn: optional function to apply only to subset meeting this condition. Returns: List of unique occurrences for testing. """ observed_configs = set() new_configs = [] for problem_config in configs: if filter_fn(problem_config): if problem_config not in observed_configs: new_configs.append(problem_config) observed_configs.add(problem_config) return new_configs
98481fa9991726f3ba4253fb132f7f7e3cb2a420
483
def convert_units(str): """ Convert some string with binary prefix to int bytes""" unit = ''.join(ele for ele in str if not ele.isdigit()).strip().lower() return int(''.join(ele for ele in str if ele.isdigit()))*{ "b": 1, "B": 1, "k": 2**10, "kb": 2**10, "m": 2**20, "mb": 2**20, "g": 2**30, "gb": 2**30, "t": 2**40, "tb": 2**40 }.get(unit, 1)
a9de044090bfd4311a27dbbf373361e7d88a1e06
484
def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set: """ Match the requested symbol reverse piecewise (split on ``::``) against the candidates. This allows you to under-specify the base namespace so that ``"MyClass"`` can match ``my_namespace::MyClass`` Args: candidates: set of possible matches for symbol symbol: the symbol to match against sep: the separator between identifier elements Returns: set of matches """ piecewise_list = set() for item in candidates: split_symbol = symbol.split(sep) split_item = item.split(sep) split_symbol.reverse() split_item.reverse() min_length = len(split_symbol) split_item = split_item[:min_length] if split_symbol == split_item: piecewise_list.add(item) return piecewise_list
1c6d7240365ef22f753aa4195cfb5e879fc453e0
485
def is_kube_version_supported(kube_version, min_version=None, max_version=None): """Check if the k8s version is supported by the application. :param kube_version: the running or target k8s version :param min_version (optional): minimum k8s version supported by the app :param max_version (optional): maximum k8s version supported by the app :returns bool: True if k8s version is supported """ if ((min_version is not None and LooseVersion(kube_version) < LooseVersion(min_version)) or (max_version is not None and LooseVersion(kube_version) > LooseVersion(max_version))): return False return True
f08a5e5eb9ac9928e2e08ddddd6d30db90e8c868
486
def chebi(name=None, identifier=None): """Build a ChEBI abundance node. :rtype: Abundance """ return Abundance(namespace='CHEBI', name=name, identifier=identifier)
bbe8cf217a545f2818d7957b01d7bbaf0a2cc6d2
487
def get_group(request): """returns all the groups in database """ group_id = request.matchdict.get('id', -1) group = Group.query.filter_by(id=group_id).first() return [ { 'id': group.id, 'name': group.name, 'thumbnail_full_path': group.thumbnail.full_path if group.thumbnail else None, 'created_by_id': group.created_by.id, 'created_by_name': group.created_by.name, 'users_count': len(group.users), } ]
6d53d8969ecebb882c6626b128a72f24dafa6997
488
def create_histogram(path_to_image, target_path=''): """ creates a histogram of a given image and either shows or saves a plot Args: path_to_image: path to the image target_path: if given, saves a plot, otherwise (if empty) shows the plot Returns: the histogram plot """ image = cv2.imread(path_to_image) depth = image.shape[2] for z in range(depth): im = image[:, :, z] mi = im.min() ma = im.max() if mi < 0 or ma > 255: print("range error: min=" + str(mi) + " max=" + ma) exit() # V1 # plt.hist(im.ravel(), 256, [0, 256]) # V2 # calculate mean value from RGB channels and flatten to 1D array vals = im.flatten() # plot histogram with 255 bins # b, bins, patches = plt.hist(vals, 255, stacked=True, density=True) counts, bins = np.histogram(vals, 255) counts = (counts - min(counts)) / (max(counts) - min(counts)) plt.hist(bins[:-1], bins, weights=counts) plt.xlim([0, 255]) # plt.show() # plt.title(path_to_image) plt.xlabel('pixel value') plt.ylabel('count') if target_path == '': plt.show() else: plt.savefig(target_path + 'histo') plt.clf() return plt
cb68b5f8bd55120d4f720020b092af02b727a6ba
489
def task_6_list_all_supplier_countries(cur) -> list: """ List all supplier countries Args: cur: psycopg cursor Returns: 29 records """ cur.execute("""SELECT country FROM suppliers""") return cur.fetchall()
a3d8af1eb2948ebc01e408265d20b0055f1a0504
490
def _energy_to_length_factor(e_unit, l_unit): """ Convert the units of Planck's constant and speed of light :param e_unit: :type e_unit: str :param l_unit: :type l_unit: str :return: c,h """ dest_h_u = ug.parse_units('%s s' % e_unit) dest_c_u = ug.parse_units('%s/s' % l_unit) if dest_h_u.dimensionality != _h_unit.dimensionality: raise ValueError("e_unit should be a valid energy unit") if dest_c_u.dimensionality != _c_unit.dimensionality: raise ValueError('l_unit should be a valid length unit') h = ug.convert(sc.h, _h_unit, dest_h_u) c = ug.convert(sc.c, _c_unit, dest_c_u) return c, h
23e1dbfac7265ff1df4cf62e3609f91d5e327a35
491
def kev_to_wavelength(kev): """Calculate the wavelength from kev""" lamda = 12.3984 / kev #keV to Angstrom return lamda
cfb3126e56bc0890dd8cf2caa50a240b380dad56
492
def _convert_rde_to_1_0_format(rde_data: dict) -> dict: """Convert defined entity to RDE 1.0. :param DefEntity rde_data: Defined entity dictionary :return: converted defined entity :rtype: dict """ new_rde = common_models.DefEntity(**rde_data) new_native_entity: AbstractNativeEntity = rde_utils.convert_runtime_rde_to_input_rde_version_format( # noqa: E501 new_rde.entity, rde_constants.RDEVersion.RDE_1_0_0) new_rde.entity = new_native_entity new_rde.entityType = common_models.EntityType.NATIVE_ENTITY_TYPE_1_0_0.value.get_id() # noqa: E501 return new_rde.to_dict()
931bc93c7326a4640892a3876885fcc19430bbe1
493
def additive_symbols(tokens, base_url): """``additive-symbols`` descriptor validation.""" results = [] for part in split_on_comma(tokens): result = pad(remove_whitespace(part), base_url) if result is None: return if results and results[-1][0] <= result[0]: return results.append(result) return tuple(results)
346eae19c5d4d936d0ad7f2cdba2191943cc7bca
494
def check_detection(frame, yx_exp, fwhm, snr_thresh, deltapix=3): """ Verify if injected companion is recovered. Parameters ---------- frame : 2d ndarray yx_exp : tuple(y, x) Expected position of the fake companion (= injected position). fwhm : int or float FWHM. snr_thresh : int or float, optional S/N threshold. deltapix : int or float, optional Error margin in pixels, between the expected position and the recovered. """ def verify_expcoord(vectory, vectorx, exp_yx): for coor in zip(vectory, vectorx): print(coor, exp_yx) if np.allclose(coor[0], exp_yx[0], atol=deltapix) and \ np.allclose(coor[1], exp_yx[1], atol=deltapix): return True return False table = vip.metrics.detection(frame, fwhm=fwhm, mode='lpeaks', bkg_sigma=5, matched_filter=False, mask=True, snr_thresh=snr_thresh, plot=False, debug=True, full_output=True, verbose=True) msg = "Injected companion not recovered" assert verify_expcoord(table.y, table.x, yx_exp), msg
ecb44b13aeed28b86713679d76aef90a7c686cf9
495
def _index_list(key_or_list, direction=None): """Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction. """ if direction is not None: return [(key_or_list, direction)] else: if isinstance(key_or_list, string_type): return [(key_or_list, ASCENDING)] elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, " "key_or_list must be an instance of list") return key_or_list
e32ddb70a10d52e1f2595cac9cb99c0381b9a3e4
496
import os def load_only_test(cfg): """Load and process test data only Args: cfg (dict): configuration file Returns: DataLoader: test DataLoader """ # Set test path path_to_test = os.path.join(cfg["DATA_DIR"], "test/") # Load the test set test_dataset = TestLoader(path_to_test) # DatasetTransformer data_transforms = apply_preprocessing(cfg=cfg["DATASET"]["PREPROCESSING"]) test_dataset = DatasetTransformer( test_dataset, transforms.Compose(data_transforms["test"]) ) # Dataloaders test_loader = DataLoader( dataset=test_dataset, batch_size=cfg["TEST"]["BATCH_SIZE"], shuffle=False, num_workers=cfg["DATASET"]["NUM_THREADS"], ) if cfg["DATASET"]["VERBOSITY"]: print( f"The test set contains {len(test_loader.dataset)} images," f" in {len(test_loader)} batches" ) return test_loader
cc94603c97bb6477a31d436868933b97accb5c0d
497
def CalculateOSNames(os_name, os_variants): """Calculates all the names an OS can be called, according to its variants. @type os_name: string @param os_name: base name of the os @type os_variants: list or None @param os_variants: list of supported variants @rtype: list @return: list of valid names """ if os_variants: return ["%s+%s" % (os_name, v) for v in os_variants] else: return [os_name]
5689ed7da55cec929045e95344c60e7a06af711d
498
import json import six from io import StringIO def generate_schema(schema_json, use_logical_types=False, custom_imports=None, avro_json_converter=None): """ Generate file containing concrete classes for RecordSchemas in given avro schema json :param str schema_json: JSON representing avro schema :param list[str] custom_imports: Add additional import modules :param str avro_json_converter: AvroJsonConverter type to use for default values :return Dict[str, str]: """ if avro_json_converter is None: avro_json_converter = 'avrojson.AvroJsonConverter' if '(' not in avro_json_converter: avro_json_converter += f'(use_logical_types={use_logical_types}, schema_types=__SCHEMA_TYPES)' custom_imports = custom_imports or [] names = schema.Names() make_avsc_object(json.loads(schema_json), names) names = [k for k in six.iteritems(names.names) if isinstance(k[1], (schema.RecordSchema, schema.EnumSchema))] names = sorted(names, key=lambda x: x[0]) main_out = StringIO() writer = TabbedWriter(main_out) write_preamble(writer, use_logical_types, custom_imports) write_schema_preamble(writer) write_get_schema(writer) write_populate_schemas(writer) current_namespace = tuple() for name, field_schema in names: # type: str, schema.Schema name = clean_fullname(name) namespace = tuple(name.split('.')[:-1]) if namespace != current_namespace: current_namespace = namespace if isinstance(field_schema, schema.RecordSchema): logger.debug(f'Writing schema: {clean_fullname(field_schema.fullname)}') write_schema_record(field_schema, writer, use_logical_types) elif isinstance(field_schema, schema.EnumSchema): logger.debug(f'Writing enum: {field_schema.fullname}', field_schema.fullname) write_enum(field_schema, writer) writer.set_tab(0) writer.write('\n__SCHEMA_TYPES = {') writer.tab() for name, field_schema in names: n = clean_fullname(field_schema.name) writer.write(f"\n'{n}': {n}Class,") writer.untab() writer.write('\n}\n\n') writer.write(f'_json_converter = {avro_json_converter}\n\n') value = main_out.getvalue() main_out.close() return value, [clean_fullname(name[0]) for name in names]
a339233427ef0e63a3b47216cee1b3ba03e4fc33
499