content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def geopad(lon, lat, data, /, nlon=1, nlat=0): """ Return array padded circularly along longitude and over the poles for finite difference methods. """ # Pad over longitude seams if nlon > 0: pad = ((nlon, nlon),) + (data.ndim - 1) * ((0, 0),) data = np.pad(data, pad, mode='wrap') lon = np.pad(lon, nlon, mode='wrap') # should be vector # Pad over poles if nlat > 0: if (data.shape[0] % 2) == 1: raise ValueError( 'Data must have even number of longitudes ' 'if you wish to pad over the poles.' ) append = np.roll( # descending in lat np.flip(data, axis=1), data.shape[0] // 2, axis=0 ) data = np.concatenate( ( append[:, -nlat:, ...], # -87.5, -88.5, -89.5 (crossover) data, # -89.5, -88.5, -87.5, ..., 87.5, 88.5, 89.5 (crossover) append[:, :nlat, ...], # 89.5, 88.5, 87.5 ), axis=1, ) lat = np.pad(lat, nlat, mode='symmetric') lat[:nlat] = 180 - lat[:nlat] # monotonic ascent lat[-nlat:] = 180 - lat[-nlat:] return lon, lat, data
8916dde690673b1d278ffab39ee3350f346a4182
3,658,607
def SL_EAKF(N,loc_rad,taper='GC',ordr='rand',infl=1.0,rot=False,**kwargs): """ Serial, covariance-localized EAKF. Ref: Karspeck, Alicia R., and Jeffrey L. Anderson. (2007): "Experimental implementation of an ensemble adjustment filter..." Used without localization, this should be equivalent (full ensemble equality) to the EnKF 'Serial'. """ def assimilator(stats,twin,xx,yy): f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0 N1 = N-1 R = h.noise Rm12 = h.noise.C.sym_sqrt_inv E = X0.sample(N) stats.assess(0,E=E) for k,kObs,t,dt in progbar(chrono.forecast_range): E = f(E,t-dt,dt) E = add_noise(E, dt, f.noise, kwargs) if kObs is not None: stats.assess(k,kObs,'f',E=E) y = yy[kObs] inds = serial_inds(ordr, y, R, anom(E)[0]) locf_at = h.loc_f(loc_rad, 'y2x', t, taper) for i,j in enumerate(inds): hE = h(E,t) hx = mean(hE,0) Y = hE - hx mu = mean(E ,0) A = E-mu # Update j-th component of observed ensemble Yj = Rm12[j,:] @ Y.T dyj = Rm12[j,:] @ (y - hx) # skk = Yj@Yj # N1 * prior var su = 1/( 1/skk + 1/N1 ) # N1 * KG alpha = (N1/(N1+skk))**(0.5) # update contraction factor # dy2 = su*dyj/N1 # mean update Y2 = alpha*Yj # anomaly update if skk<1e-9: continue # Update state (regress update from observation space) # Localized local, coeffs = locf_at(j) if len(local) == 0: continue Regression = (A[:,local]*coeffs).T @ Yj/np.sum(Yj**2) mu[ local] += Regression*dy2 A[:,local] += np.outer(Y2 - Yj, Regression) # Without localization: #Regression = A.T @ Yj/np.sum(Yj**2) #mu += Regression*dy2 #A += np.outer(Y2 - Yj, Regression) E = mu + A E = post_process(E,infl,rot) stats.assess(k,kObs,E=E) return assimilator
e7ca69f71cf83a4389086d14791902eb5a661b9e
3,658,608
def CalculateNMaxNCharge(mol): """ ################################################################# Most negative charge on N atoms -->QNmin Usage: result=CalculateNMaxNCharge(mol) Input: mol is a molecule object. Output: result is a numeric value. ################################################################# """ return _CalculateElementMaxNCharge(mol,AtomicNum=7)
ae63c3f2c6faa8b0d9f7d6ae3b320a9c3b1002d6
3,658,609
def cnn_5l4(image, **kwargs): """ :param in: (TensorFlow Tensor) Image input placeholder :param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN :return: (TensorFlow Tensor) The CNN output layer """ activ = tf.nn.relu layer_1 = activ(conv(image, 'c1', n_filters=222, filter_size=4, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_2 = activ(conv(layer_1, 'c2', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_3 = activ(conv(layer_2, 'c3', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_4 = activ(conv(layer_3, 'c4', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_5 = activ(conv(layer_4, 'c5', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs)) layer_lin = conv_to_fc(layer_5) return layer_lin
af059b9a2899c1adcc9f11f4742ffaac8a971dba
3,658,610
def read_dns_data(dns_fn): """ Read data in from a DNS file :param str dns_fn: The filename of the DNS """ fed = open(dns_fn, 'r') begin_data = False dns_data = {} for line in fed.readlines(): if begin_data: if "t = " in line: tc = float(line[3:]) dns_data.update({ tc:{'N':np.empty((0, 3)), 'MP':np.empty((0, 3))} }) else: data = [s.replace(',', '') for s in line.split()] typ = data[0] pos = np.array([float(data[i]) for i in range(2, 5)]) dns_data[tc][typ] = np.vstack([dns_data[tc][typ], pos]) if (line.strip() == "BEGIN DATA"): begin_data = True fed.close() return dns_data
2c73289c6284b47901a8f7c91bce6df75849c822
3,658,611
def arithmetic_mean(iterable): """Zero-length-safe arithmetic mean.""" values = np.asarray(iterable) if not values.size: return 0 return values.mean()
3972885d92654d842a163d64c47b585ad6865c98
3,658,612
def play_process(url): """ Create and return process to read audio from url and send to analog output""" return FfmpegProcess(f'ffmpeg -i {url} -f alsa default')
2246f9385e48dda9398752ecd9fa70914d17c55f
3,658,613
from typing import Iterable def iterable_to_wikitext( items: Iterable[object], *, prefix: str = "\n* " ) -> str: """ Convert iterable to wikitext. Pages are converted to links. All other objects use their string representation. :param items: Items to iterate :param prefix: Prefix for each item when there is more than one item """ if not items: return "" if len(list(items)) == 1: prefix = "" text = "" for item in items: if isinstance(item, BasePage): item = item.title(as_link=True, textlink=True) text += f"{prefix}{item}" return text
775bed839d890ab40aeace76a82f881e076cafa2
3,658,614
def plot_timeSeries(df, col_name, divide=None, xlabel="Days", line=True, title="Time series values", figsize=(9,9)): """ Plot a column of the given time series DataFrame. Parameters ---------- df: pd.DataFrame DataFrame indexed by days (i.e. the index is a pd.DatetimeIndex). col_name: str Indicates the specified column to plot. divide: str Indicates if and how to divide the plotted values. It can either be None, "year", "month" or "season". (The meteorological seasons are considered, and not the astronomical ones). That division is simply made graphically using different colors. xlabel: str Label to put on the x axis. line: bool Indicates whether to connect the points with a line. title: str Title of the plot. figsize: tuple Dimensions of the plot. Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. """ fig, ax = plt.subplots(figsize=figsize) if not divide: ax.plot(df.index, df[col_name], 'o:' if line else 'o') else: groups = group_days_by(df.index, criterion=divide) color = None for group in groups: if divide=="season": colors = {"Winter":"blue", "Spring":"green", "Summer":"yellow", "Fall":"red"} color = colors[group[0]] elif divide=="month": colors = {"January":"b", "February":"g", "March":"r", "April":"c", "May":"m", "June":"y", "July":"k", "August":"peru", "September":"crimson", "October":"orange", "November":"darkgreen", "December":"olivedrab"} color = colors[group[0]] ax.plot(group[1], df.loc[group[1],col_name], 'o:' if line else 'o', color=color , label=group[0]) ax.set_xlabel(xlabel) ax.set_ylabel(col_name) ax.set_title(title) ax.grid() if divide: ax.legend() return ax
279f74422ae6b186128347cc971a094c13f22c4b
3,658,615
def is_bv(a): """Return `True` if `a` is a Z3 bit-vector expression. >>> b = BitVec('b', 32) >>> is_bv(b) True >>> is_bv(b + 10) True >>> is_bv(Int('x')) False """ return isinstance(a, BitVecRef)
7c1cd1d3d679cdceb12955e61f54861b248ff4a2
3,658,617
def bgsub_1D(raw_data, energy_axis, edge, **kwargs): """ Full background subtraction function for the 1D case- Optional LBA, log fitting, LCPL, and exponential fitting. For more information on non-linear fitting function, see information at https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html Inputs: raw_data - 1D spectrum energy_axis - corresponding energy axis edge - edge parameters defined by KEM convention **kawrgs: fit - choose the type of background fit, default == 'pl' == Power law. Can also use 'exp'== Exponential, 'lin' == Linear, 'lcpl' == LCPL. log - Boolean, if true, log transform data and fit using QR factorization, default == False. nstd - Standard deviation spread of r error from non-linear power law fitting. Default == 100. ftol - default to 0.0005, Relative error desired in the sum of squares. gtol - default to 0.00005, Orthogonality desired between the function vector and the columns of the Jacobian. xtol - default to None, Relative error desired in the approximate solution. maxfev - default to 50000, Only change if you are consistenly catching runtime errors and loosening gtol/ftols are not making a good enough fit. method - default is 'trf', see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares for description of methods Note: may need stricter tolerances on ftol/gtol for noisier data. Anecdotally, a stricter gtol (as low as 1e-8) has a larger effect on the quality of the bgsub. Outputs: bg_1D - background spectrum """ fit_start_ch = eVtoCh(edge[0], energy_axis) fit_end_ch = eVtoCh(edge[1], energy_axis) zdim = len(raw_data) ewin = energy_axis[fit_start_ch:fit_end_ch] esub = energy_axis[fit_start_ch:] bg_1D = np.zeros_like(raw_data) fy = np.zeros((1,zdim)) fy[0,:] = raw_data ## Either fast fitting -> log fitting, Or slow fitting -> non-linear fitting if 'log' in kwargs.keys(): log = kwargs['log'] else: log = False ## Fitting parameters for non-linear curve fitting if non-log based fitting if 'ftol' in kwargs.keys(): ftol = kwargs['ftol'] else: ftol = 1e-8 if 'gtol' in kwargs.keys(): gtol = kwargs['gtol'] else: gtol = 1e-8 if 'xtol' in kwargs.keys(): xtol = kwargs['xtol'] else: xtol = 1e-8 if 'maxfev' in kwargs.keys(): maxfev = kwargs['maxfev'] else: maxfev = 50000 if 'method' in kwargs.keys(): method = kwargs['method'] else: method = 'trf' ## Determine if fitting is power law or exponenetial if 'fit' in kwargs.keys(): fit = kwargs['fit'] if fit == 'exp': fitfunc = exponential bounds = ([0, 0], [np.inf, np.inf]) elif fit == 'pl': fitfunc = powerlaw elif fit == 'lcpl': fitfunc = lcpowerlaw elif fit == 'lin': fitfunc = linear else: print('Did not except fitting function, please use either \'pl\' for powerlaw, \'exp\' for exponential, \'lin\' for linear or \'lcpl\' for LCPL.') else: fitfunc = powerlaw ## If fast fitting linear background, find fit using qr factorization if fitfunc==linear: Blin = fy[:,fit_start_ch:fit_end_ch] Alin = np.zeros((len(ewin),2)) Alin[:,0] = np.ones(len(ewin)) Alin[:,1] = ewin Xlin = qrnorm(Alin,Blin.T) Elin = np.zeros((len(esub),2)) Elin[:,0] = np.ones(len(esub)) Elin[:,1] = esub bgndLINline = np.dot(Xlin.T,Elin.T) bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndLINline ## If fast log fitting and powerlaw, find fit using qr factorization elif log & (fitfunc==powerlaw): Blog = fy[:,fit_start_ch:fit_end_ch] Alog = np.zeros((len(ewin),2)) Alog[:,0] = np.ones(len(ewin)) Alog[:,1] = np.log(ewin) Xlog = qrnorm(Alog,np.log(abs(Blog.T))) Elog = np.zeros((len(esub),2)) Elog[:,0] = np.ones(len(esub)) Elog[:,1] = np.log(esub) bgndPLline = np.exp(np.dot(Xlog.T,Elog.T)) bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndPLline ## If fast log fitting and exponential, find fit using qr factorization elif log & (fitfunc==exponential): Bexp = fy[:,fit_start_ch:fit_end_ch] Aexp = np.zeros((len(ewin),2)) Aexp[:,0] = np.ones(len(ewin)) Aexp[:,1] = ewin Xexp = qrnorm(Aexp,np.log(abs(Bexp.T))) Eexp = np.zeros((len(esub),2)) Eexp[:,0] = np.ones(len(esub)) Eexp[:,1] = esub bgndEXPline = np.exp(np.dot(Xexp.T,Eexp.T)) bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndEXPline ## Power law non-linear curve fitting using scipy.optimize.curve_fit elif ~log & (fitfunc==powerlaw): popt_pl,pcov_pl=curve_fit(powerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0, ftol=ftol, gtol=gtol, xtol=xtol) c,r = popt_pl bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - powerlaw(energy_axis[fit_start_ch:],c,r) ## Exponential non-linear curve fitting using scipy.optimize.curve_fit elif ~log & (fitfunc==exponential): popt_exp,pcov_exp=curve_fit(exponential, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0,p0=[0,0], ftol=ftol, gtol=gtol, xtol=xtol) a,b = popt_exp bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - exponential(energy_axis[fit_start_ch:],a,b) ## LCPL non-linear curve fitting using scipy.optimize.curve_fit elif fitfunc==lcpowerlaw: if 'nstd' in kwargs.keys(): nstd = kwargs['nstd'] else: nstd = 100 popt_pl,pcov_pl=curve_fit(powerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0, ftol=ftol, gtol=gtol, xtol=xtol) c,r = popt_pl perr = np.sqrt(np.diag(pcov_pl)) rstd = perr[1] popt_lcpl,pcov_lcpl=curve_fit(lcpowerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method, verbose = 0,p0=[c/2,r-nstd*rstd,c/2,r+nstd*rstd], ftol=ftol, gtol=gtol, xtol=xtol) c1,r1,c2,r2 = popt_lcpl bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - lcpowerlaw(energy_axis[fit_start_ch:],c1,r1,c2,r2) return bg_1D
a3f273e55f49811ce9af4ee5c23d4078fe83535a
3,658,618
import random def about_garble(): """ about_garble Returns one of several strings for the about page """ garble = ["leverage agile frameworks to provide a robust synopsis for high level overviews.", "iterate approaches to corporate strategy and foster collaborative thinking to further the overall value proposition.", "organically grow the holistic world view of disruptive innovation via workplace change management and empowerment.", "bring to the table win-win survival strategies to ensure proactive and progressive competitive domination.", "ensure the end of the day advancement, a new normal that has evolved from epistemic management approaches and is on the runway towards a streamlined cloud solution.", "provide user generated content in real-time will have multiple touchpoints for offshoring."] return garble[random.randint(0, len(garble) - 1)]
c391891f97a7bc6df5287173aa160713cdfff675
3,658,619
def parse_term_5_elems(expr_list, idx): """ Try to parse a terminal node from five elements of {expr_list}, starting from {idx}. Return the new expression list on success, None on error. """ # The only 3 items node is pk_h if expr_list[idx : idx + 2] != [OP_DUP, OP_HASH160]: return if not isinstance(expr_list[idx + 2], bytes): return if len(expr_list[idx + 2]) != 20: return if expr_list[idx + 3 : idx + 5] != [OP_EQUAL, OP_VERIFY]: return node = Node().construct_pk_h(expr_list[idx + 2]) expr_list[idx : idx + 5] = [node] return expr_list
8c0c365483c44a767b3e254f957af175125da2d6
3,658,620
def display_clusters(): """ Method to display the clusters """ offset = int(request.args.get('offset', '0')) limit = int(request.args.get('limit', '50')) clusters_id_sorted = sorted(clusters, key=lambda x : -len(clusters[x])) batches = chunks(range(len(clusters_id_sorted)), size=limit) return render_template('clusters.html', offset=offset, limit=limit, batches=batches, ordered_list=clusters_id_sorted[offset:offset+limit+1], idx_to_path=idx_to_path, clusters=clusters)
e3d578cff54e66ee4b096bcf1e7181a3bac1c845
3,658,621
def densify_sampled_item_predictions(tf_sample_predictions_serial, tf_n_sampled_items, tf_n_users): """ Turns the serial predictions of the sample items in to a dense matrix of shape [ n_users, n_sampled_items ] :param tf_sample_predictions_serial: :param tf_n_sampled_items: :param tf_n_users: :return: """ densified_shape = tf.cast(tf.stack([tf_n_users, tf_n_sampled_items]), tf.int32) densified_predictions = tf.reshape(tf_sample_predictions_serial, shape=densified_shape) return densified_predictions
e1dbe0e74c791e1d9b7613fbe52b034a60376497
3,658,622
def get_market_book(symbols=None, **kwargs): """ Top-level function to obtain Book data for a symbol or list of symbols Parameters ---------- symbols: str or list, default None A symbol or list of symbols kwargs: Additional Request Parameters (see base class) """ return Book(symbols, **kwargs).fetch()
8b1bc8ed07a611cef490f616996aae05ce445ff1
3,658,623
def ndarange(*args, shape: tuple = None, **kwargs): """Generate arange arrays of arbitrary dimensions.""" arr = np.array([np.arange(*args[i], **kwargs) for i in range(len(args))]) return arr.reshape(shape) if shape is not None else arr.T
42a5070e653386a71a9be7949f5e9341bfbc50c9
3,658,624
def runningSum(self, nums): """ :type nums: List[int] :rtype: List[int] 5% faster 100% less memory """ sum = 0 runningSum = [0] * len(nums) for i in range(len(nums)): for j in range(i+1): runningSum[i] += nums[j] return runningSum
393849c4aa1d23b15717748066e21abceaf6d5d9
3,658,625
def edit_recovery(request, recovery_id): """This view is used to edit/update existing tag recoveries.""" clip_codes = sorted(list(CLIP_CODE_CHOICES), key=lambda x: x[0]) tag_types = sorted(list(TAG_TYPE_CHOICES), key=lambda x: x[0]) tag_origin = sorted(list(TAG_ORIGIN_CHOICES), key=lambda x: x[0]) tag_colours = sorted(list(TAG_COLOUR_CHOICES), key=lambda x: x[0]) tag_position = sorted(list(TAG_POSITION_CHOICES), key=lambda x: x[0]) recovery = get_object_or_404(Recovery, id=recovery_id) report = recovery.report form = RecoveryForm( report_id=report.id, instance=recovery, data=request.POST or None ) if request.method == "POST": if form.is_valid(): recovery = form.save(report) return redirect("tfat:recovery_detail", recovery_id=recovery.id) return render( request, "tfat/recovery_form.html", { "form": form, "action": "edit", "clip_codes": clip_codes, "tag_types": tag_types, "tag_origin": tag_origin, "tag_colours": tag_colours, "tag_position": tag_position, }, )
f9da1a4377efd436e93cf2be0af2c2e09cc3e31d
3,658,628
def e(string, *args): """Function which formats error messages.""" return string.format(*[pformat(arg) for arg in args])
8734d01544211fde3f8ee24f0f91dc06763d4a1f
3,658,629
def membership_ending_task(user): """ :return: Next task that will end the membership of the user """ task = (UserTask.q .filter_by(user_id=user.id, status=TaskStatus.OPEN, type=TaskType.USER_MOVE_OUT) # Casting jsonb -> bool directly is only supported since PG v11 .filter(UserTask.parameters_json['end_membership'].cast(String).cast(Boolean) == True) .order_by(UserTask.due.asc())).first() return task
2043c87eaabbf3360f1bec331a03e1c7db8bc783
3,658,630
import warnings def hmsstr_to_rad(hmsstr): """Convert HH:MM:SS.SS sexigesimal string to radians. """ hmsstr = np.atleast_1d(hmsstr) hours = np.zeros(hmsstr.size) for i,s in enumerate(hmsstr): # parse string using regular expressions match = hms_re.match(s) if match is None: warnings.warn("Input is not a valid sexigesimal string: %s" % s) hours[i] = np.nan continue d = match.groupdict(0) # default value is 0 # Check sign of hms string if d['sign'] == '-': sign = -1 else: sign = 1 hour = float(d['hour']) + \ float(d['min'])/60.0 + \ float(d['sec'])/3600.0 hours[i] = sign*hour return hour_to_rad(hours)
e57266c43e3b0f8893f9c71cfbea609cf7c93709
3,658,631
def find_optimum_transformations(init_trans, s_pts, t_pts, template_spacing, e_func, temp_tree, errfunc): """ Vary the initial transformation by a translation of up to three times the grid spacing and compute the transformation with the smallest least square error. Parameters: ----------- init_trans : 4-D transformation matrix Initial guess of the transformation matrix from the subject brain to the template brain. s_pts : Vertex coordinates in the subject brain. t_pts : Vertex coordinates in the template brain. template_spacing : float Grid spacing of the vertices in the template brain. e_func : str Error function to use. Either 'balltree' or 'euclidian'. temp_tree : BallTree(t_pts) if e_func is 'balltree'. errfunc : The error function for the computation of the least squares error. Returns: -------- poss_trans : list of 4-D transformation matrices List of one transformation matrix for each variation of the intial transformation with the smallest least squares error. """ # template spacing in meters tsm = template_spacing / 1e3 # Try different initial translations in space to avoid local minima # No label should require a translation by more than 3 times the grid spacing (tsm) auto_match_iters = np.array([[0., 0., 0.], [0., 0., tsm], [0., 0., tsm * 2], [0., 0., tsm * 3], [tsm, 0., 0.], [tsm * 2, 0., 0.], [tsm * 3, 0., 0.], [0., tsm, 0.], [0., tsm * 2, 0.], [0., tsm * 3, 0.], [0., 0., -tsm], [0., 0., -tsm * 2], [0., 0., -tsm * 3], [-tsm, 0., 0.], [-tsm * 2, 0., 0.], [-tsm * 3, 0., 0.], [0., -tsm, 0.], [0., -tsm * 2, 0.], [0., -tsm * 3, 0.]]) # possible translation matrices poss_trans = [] for p, ami in enumerate(auto_match_iters): # vary the initial translation value by adding ami tx, ty, tz = init_trans[0, 3] + ami[0], init_trans[1, 3] + ami[1], init_trans[2, 3] + ami[2] sx, sy, sz = init_trans[0, 0], init_trans[1, 1], init_trans[2, 2] rx, ry, rz = 0, 0, 0 # starting point for finding the transformation matrix trans which # minimizes the error between np.dot(s_pts, trans) and t_pts x0 = np.array([tx, ty, tz, rx, ry, rz]) def error(x): tx_, ty_, tz_, rx_, ry_, rz_ = x trans0 = np.zeros([4, 4]) trans0[:3, :3] = rotation3d(rx_, ry_, rz_) * [sx, sy, sz] trans0[0, 3] = tx_ trans0[1, 3] = ty_ trans0[2, 3] = tz_ # rotate and scale estim = np.dot(s_pts, trans0[:3, :3].T) # translate estim += trans0[:3, 3] if e_func == 'balltree': err = errfunc(estim[:, :3], temp_tree) else: # e_func == 'euclidean' err = errfunc(estim[:, :3], t_pts) return err est, _, info, msg, _ = leastsq(error, x0, full_output=True) est = np.concatenate((est, (init_trans[0, 0], init_trans[1, 1], init_trans[2, 2]) )) trans = _trans_from_est(est) poss_trans.append(trans) return poss_trans
bbc4786827c22158eee33ff9a5e4aaa2939b9705
3,658,632
def execute_transaction(query): """Execute Transaction""" return Neo4jHelper.run_single_query(query)
51e8e58bb4cad30b9ae9c7b7d7901ee212c9d26a
3,658,633
from scipy.linalg import null_space from angle_set import create_theta, get_n_linear, perturbe_points def generate_linear_constraints(points, verbose=False): """ Given point coordinates, generate angle constraints. """ N, d = points.shape num_samples = get_n_linear(N) * 2 if verbose: print('N={}, generating {}'.format(N, num_samples)) M = int(N * (N - 1) * (N - 2) / 2) thetas = np.empty((num_samples, M + 1)) for i in range(num_samples): points_pert = perturbe_points(points, magnitude=0.0001) theta, __ = create_theta(points_pert) thetas[i, :-1] = theta thetas[i, -1] = -1 CT = null_space(thetas) A = CT[:-1, :].T b = CT[-1, :] return A, b
b98354cd6b57d7a33c6e8a43da80b358e358138c
3,658,634
def add_node_to_parent(node, parent_node): """ Add given object under the given parent preserving its local transformations :param node: str :param parent_node: str """ return maya.cmds.parent(node, parent_node, add=True, s=True)
1f264b7e30c6ebc2285faa987ffc6142ec62d87f
3,658,635
def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
61ccce8b9ffbec3e76aa9e78face469add28437e
3,658,636
def Binary(value): """construct an object capable of holding a binary (long) string value.""" return value
2a33d858b23ac2d72e17ea8ede294c5311cb74be
3,658,638
def _get_domain_session(token, domain_name=None): """ Return v3 session for token """ domain_name = domain_name or 'default' auth = v3.Token(auth_url=get_auth_url(), domain_id=domain_name, token=token) return session.Session(auth=auth, user_agent=USER_AGENT, verify=verify_https())
1ad7dcd8a9b6ea12e1a73886581c86252602a438
3,658,639
import six def fix_troposphere_references(template): """"Tranverse the troposphere ``template`` looking missing references. Fix them by adding a new parameter for those references.""" def _fix_references(value): if isinstance(value, troposphere.Ref): name = value.data['Ref'] if name not in (list(template.parameters.keys()) + list(template.resources.keys())) and not name.startswith('AWS::'): template.add_parameter( troposphere.Parameter( name, Type=getattr(value, '_type', 'String'), ) ) elif isinstance(value, troposphere.Join): for v in value.data['Fn::Join'][1]: _fix_references(v) elif isinstance(value, troposphere.BaseAWSObject): for _, v in six.iteritems(value.properties): _fix_references(v) for _, resource in six.iteritems(template.resources): for _, value in six.iteritems(resource.properties): _fix_references(value) return template
9570e10262d7293a79b76f78508e57289d9b1e2d
3,658,641
import configparser def parse_config_to_dict(cfg_file, section): """ Reads config file and returns a dict of parameters. Args: cfg_file: <String> path to the configuration ini-file section: <String> section of the configuration file to read Returns: cfg: <dict> configuration parameters of 'section' as a dict """ cfg = configparser.ConfigParser() cfg.read(cfg_file) if cfg.has_section(section): return dict(cfg.items(section)) else: print("Section '%s' not found in file %s!" % (section, cfg_file)) return None
021e3594f3130e502934379c0f5c1ecea228017b
3,658,642
def cnn_net(data, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2, win_size=3): """ Conv net """ # embedding layer emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) # convolution layer conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, filter_size=win_size, act="tanh", pool_type="max") # full connect layer fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) # softmax layer prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act="softmax") return prediction, fc_1
47127d5124f48b2be187d15291c2f2bc63f072d7
3,658,643
def FormatRow(Cn, Row, COLSP): """ """ fRow = "" for i, c in enumerate(Row): sc = str(c) lcn = len(Cn[i]) sc = sc[ 0 : min(len(sc), lcn+COLSP-2) ] fRow += sc + " "*(COLSP+lcn-len(sc)) return fRow
53d43fc897d1db5ed3c47d6046d90548939b1298
3,658,645
def handle_release(pin, evt): """ Clears the last tone/light when a button is released. """ if pin > 4: return False pin -= 1 explorerhat.light[pin].off() tone.power_off()
f4833bb289c9dfc45cd572ad754bd270c758ed09
3,658,646
from typing import List def makeRoute(start : str, end : str) -> List[str]: """Find the shortest route between two systems. :param str start: string name of the starting system. Must exist in bbData.builtInSystemObjs :param str end: string name of the target system. Must exist in bbData.builtInSystemObjs :return: list of string system names where the first element is start, the last element is end, and all intermediary systems are adjacent :rtype: list[str] """ return bbAStar(start, end, bbData.builtInSystemObjs)
6045b07ff5ceceacea4ad43ae2d52a67a0f46ec9
3,658,647
def norm_error(series): """Normalize time series. """ # return series new_series = deepcopy(series) new_series[:,0] = series[:,0] - np.mean(series[:,0]) return 2*(new_series)/max(abs(new_series[:,0]))
a7af6be8b8ddc800609c3385a96f5a80dfd02853
3,658,649
def f1d(x): """Non-linear function for simulation""" return(1.7*(1/(1+np.exp(-(x-0.5)*20))+0.75*x))
75e3bd8a90fe41dfded9b6063868b6766351a8b0
3,658,650
def get_field_map(src, flds): """ Returns a field map for an arcpy data itme from a list or dictionary. Useful for operations such as renaming columns merging feature classes. Parameters: ----------- src: str, arcpy data item or arcpy.mp layer or table Source data item containing the desired fields. flds: dict <str: str> Mapping between old (keys) and new field names (values). Returns: -------- arcpy.FieldMappings """ mappings = arcpy.FieldMappings() if isinstance(flds, list): flds = {n: n for n in flds} for old_name, new_name in flds.items(): fm = arcpy.FieldMap() fm.addInputField(src, old_name) out_f = fm.outputField out_f.name = new_name out_f.aliasName = new_name fm.outputField = out_f fm.outputField.name = new_name mappings.addFieldMap(fm) return mappings
18e6bbae491659b7819aa3584eb40242dea93f11
3,658,651
def b32qlc_decode(value): """ Decodes a value in qlc encoding to bytes using base32 algorithm with a custom alphabet: '13456789abcdefghijkmnopqrstuwxyz' :param value: the value to decode :type: bytes :return: decoded value :rtype: bytes >>> b32qlc_decode(b'fxop4ya=') b'okay' """ return b32decode(value.translate(QLC_DECODE_TRANS))
8b5bbb0f1900a3b89486c81561fd4c253604287e
3,658,652
def createPreProcessingLayers(): """ Creates a model with the initial pre-processing layers. """ model = Sequential() model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3))) model.add(Cropping2D(cropping=((50, 20), (0, 0)))) return model
1e087ae4bdd1a942845f4f7554e1b27436c6783e
3,658,653
def get_random_atoms(a=2.0, sc_size=2, numbers=[6, 8], set_seed: int = None): """Create a random structure.""" if set_seed: np.random.seed(set_seed) cell = np.eye(3) * a positions = np.array([[0, 0, 0], [a/2, a/2, a/2]]) unit_cell = Atoms(cell=cell, positions=positions, numbers=numbers, pbc=True) multiplier = np.identity(3) * sc_size atoms = make_supercell(unit_cell, multiplier) atoms.positions += (2 * np.random.rand(len(atoms), 3) - 1) * 0.1 flare_atoms = FLARE_Atoms.from_ase_atoms(atoms) return flare_atoms
710592af7db3e24529b68b84e112641b5da63a98
3,658,654
def vgg16_bn(pretrained=False, **kwargs): """VGG 16-layer model (configuration "D") with batch normalization Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn'])) return model
34f8e4965555ed4cb046c8ab4e5cde799d887040
3,658,656
import numpy def tau(x, cval): """Robust estimators of location and scale, with breakdown points of 50%. Also referred to as: Tau measure of location by Yohai and Zamar Source: Yohai and Zamar JASA, vol 83 (1988), pp 406-413 and Maronna and Zamar Technometrics, vol 44 (2002), pp. 307-317""" med = median(x) mad = median(numpy.abs(x - med)) zscore = 0.675 # Z-score of the 75th percentile of the normal distribution s = zscore * mad wnom = 0 wden = 0 for i in range(len(x)): y = (x[i] - med) / s temp = (1 - (y / cval)**2)**2 if abs(temp) <= cval: wnom += temp * x[i] wden += temp return wnom / wden
6f75ee23f50e94d1ee2754949f5c102d63ac4cab
3,658,657
def shn_gis_location_represent(id, showlink=True): """ Represent a location given its id """ table = db.gis_location try: location = db(table.id == id).select(table.id, table.name, table.level, table.parent, table.lat, table.lon, cache=(cache.ram, 60), limitby=(0, 1)).first() return shn_gis_location_represent_row(location, showlink) except: try: # "Invalid" => data consistency wrong represent = location.id except: represent = NONE return represent
758dfb8e32178e864f838a790eadf598f65ae6ec
3,658,658
def de_pearson_dataframe(df, genes, pair_by='type', gtex=True, tcga=True): """ PearsonR scores of gene differential expression between tumor and normal types. 1. Calculate log2FC of genes for TCGA tumor samples with matching TCGA normal types 2. Compare log2fc to tumor type compared to all other normal types 3. Calculate PearsonR and save :param pd.DataFrame df: Exp/TPM dataframe containing "type"/"tissue/tumor/label" metadata columns :param list genes: Genes to use in differential expression calculation :param str pair_by: How to pair tumors/normals. Either by "type" or "tissue" :param bool gtex: If True, includes GTEx in normal set :param bool tcga: If True, includes TCGA in normal set :return: PearsonR dataframe :rtype: pd.DataFrame """ # Subset by Tumor/Normal tumor = df[df.label == 'tcga-tumor'] tcga_n = df[df.label == 'tcga-normal'] # Determine normal comparison group based on options if gtex and tcga: normal = df[df.tumor == 'no'] elif gtex: normal = df[df.label == 'gtex'] else: normal = tcga_n # Identify tumor types with paired tcga-normal tum_types = [x for x in sorted(tumor[pair_by].unique()) if x in sorted(df[df.label == 'tcga-normal'][pair_by].unique())] norm_types = [] # For all paired tumor_types, calculate l2fc, then PearsonR of l2fc to all normal tumor types pearson_l2fc = defaultdict(list) for tum_type in tum_types: # First calculate TCGA tumor/normal prior for comparison t_med = tumor[tumor[pair_by] == tum_type][genes].median() n_med = tcga_n[tcga_n[pair_by] == tum_type][genes].median() prior_l2fc = log2fc(t_med, n_med) # For every normal type, calculate pearsonR correlation for (norm_type, label), _ in normal.groupby(pair_by).label.value_counts().iteritems(): if tum_type == norm_type: l2fc = prior_l2fc else: n_med = normal[normal[pair_by] == norm_type][genes].median() l2fc = log2fc(t_med, n_med) # Calculate PearsonR of l2fc and comparison tissue/type pearson_r = round(pearsonr(prior_l2fc, l2fc)[0], 2) pearson_l2fc[tum_type[:20]].append(pearson_r) norm_label = '{}_{}'.format(label, norm_type[:20]) if norm_label not in norm_types: norm_types.append(norm_label) return pd.DataFrame(pearson_l2fc, index=norm_types)
29423402b24acc67a278cbdee03916add4228d7d
3,658,659
def load_YUV_as_dic_tensor(path_img): """ Construct a dic with 3 entries ('y','u', 'v'), each of them is a tensor and is loaded from path_img + key + '.png'. ! Return a dictionnary of 3D tensor (i.e. without a dummy batch index) """ dic_res = {} key = ['y', 'u', 'v'] for k in key: img = Image.open(path_img + '_' + k + '.png') # check if image mode is correct: it should be a one # canal uint8 image (i.e. mode L) if img.mode != 'L': img = img.convert('L') dic_res[k] = to_tensor(img) return dic_res
b0fe081b36c70ba8a185f151b13c5f046ef26ad6
3,658,660
def tensor_log10(t1, out_format, dtype=None): """ Takes the log base 10 of each input in the tensor. Note that this is applied to all elements in the tensor not just non-zeros. Warnings --------- The log10 of 0 is undefined and is performed on every element in the tensor regardless of sparsity. Parameters ------------ t1: tensor, array_like input tensor or array_like object out_format: format, mode_format, optional * If a :class:`format` is specified, the result tensor is stored in the format out_format. * If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions stored in the :class:`mode_format` passed in. dtype: Datatype The datatype of the output tensor. Examples ---------- >>> import pytaco as pt >>> pt.tensor_log10([10, 100], out_format=pt.compressed, dtype=pt.float32).to_array() array([1., 2.], dtype=float32) Returns -------- log10: tensor The element wise log10 of the input tensor. """ t1 = as_tensor(t1, copy=False) cast_val = _cm.max_type(_cm.float32, t1.dtype) f = lambda x: _cm.log10(_cm.cast(x, cast_val)) return _compute_unary_elt_eise_op(f, t1, out_format, dtype)
ff5c1a2f4cee9bc287ac81d3d3e524c1292fa2a7
3,658,661
def get_file_phenomena_i(index): """ Return file phenomena depending on the value of index. """ if index <= 99: return [phen[0]] elif index >= 100 and index <= 199: return [phen[1]] elif index >= 200 and index <= 299: return [phen[2]] elif index >= 300 and index <= 399: return [phen[3]] elif index >= 400 and index <= 499: return phen[0:2] elif index >= 500 and index <= 599: return phen[0:3] elif index >= 600 and index <= 699: tmp_l = phen[0:2] tmp_l.append(phen[3]) return tmp_l
18beac08b59aec18b33f6472866a50decd01db30
3,658,662
def resource_cache_map(resource_id, flush=True): """cache resource info""" if flush: map_resources(resource_ids=[resource_id, ]) if resource_id not in CDNRESOURCE: raise InvalidArgument('Resource not exit') return CDNRESOURCE[resource_id]
5e67546db9008e805b80c1ed7545d3787444c402
3,658,663
def _preprocess_html(table_html): """Parses HTML with bs4 and fixes some glitches.""" table_html = table_html.replace("<br />", "<br /> ") table = bs4.BeautifulSoup(table_html, "html5lib") table = table.find("table") # Delete hidden style annotations. for tag in table.find_all(attrs={"style": "display:none"}): tag.decompose() # Make sure "rowspan" is not set to an illegal value. for tag in table.find_all("td"): for attr in list(tag.attrs): if attr == "rowspan": tag.attrs[attr] = "" return table
1062c5cdbb058ea36b1c877d7787aebbde87c642
3,658,664
def parse_campus_hours(data_json, eatery_model): """Parses a Cornell Dining json dictionary. Returns 1) a list of tuples of CampusEateryHour objects for a corresponding CampusEatery object and their unparsed menu 2) an array of the items an eatery serves. Args: data_json (dict): a valid dictionary from the Cornell Dining json eatery_model (CampusEatery): the CampusEatery object to which to link the hours. """ eatery_hours_and_menus = [] dining_items = [] for eatery in data_json["data"]["eateries"]: eatery_slug = eatery.get("slug", "") if eatery_model.slug == eatery_slug: dining_items = get_trillium_menu() if eatery_slug == TRILLIUM_SLUG else parse_dining_items(eatery) hours_list = eatery["operatingHours"] for hours in hours_list: new_date = hours.get("date", "") hours_events = hours["events"] if hours_events: for event in hours_events: start, end = format_time(event.get("start", ""), event.get("end", ""), new_date) eatery_hour = CampusEateryHour( eatery_id=eatery_model.id, date=new_date, event_description=event.get("descr", ""), event_summary=event.get("calSummary", ""), end_time=end, start_time=start, ) eatery_hours_and_menus.append((eatery_hour, event.get("menu", []))) else: eatery_hour = CampusEateryHour( eatery_id=eatery_model.id, date=new_date, event_description=None, event_summary=None, end_time=None, start_time=None, ) eatery_hours_and_menus.append((eatery_hour, [])) return eatery_hours_and_menus, dining_items
95e7bbc898f4516b9812d3f68749651a32f3535f
3,658,665
from typing import Dict from typing import Tuple def _change_relationships(edge: Dict) -> Tuple[bool, bool]: """Validate relationship.""" if 'increases' in edge[1]['relation'] or edge[1]['relation'] == 'positive_correlation': return True, True elif 'decreases' in edge[1]['relation'] or edge[1]['relation'] == 'negative_correlation': return True, False return False, False
b826eb1eb7bd1e7eed7fd8577b5c04d827a75e56
3,658,666
def extract_behaviour_sync(sync, chmap=None, display=False, tmax=np.inf): """ Extract wheel positions and times from sync fronts dictionary :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans :param chmap: dictionary containing channel index. Default to constant. chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15} :param display: bool or matplotlib axes: show the full session sync pulses display defaults to False :return: trials dictionary """ bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax) if bpod.times.size == 0: raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. ' 'Check channel maps.') frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax) audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod( bpod['times'], bpod['polarities']) t_ready_tone_in, t_error_tone_in = _assign_events_audio( audio['times'], audio['polarities']) trials = Bunch({ 'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'), 'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in), 'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open), 'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2), 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'), 'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']), 'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in) }) # feedback times are valve open on good trials and error tone in on error trials trials['feedback_times'] = np.copy(trials['valveOpen_times']) ind_err = np.isnan(trials['valveOpen_times']) trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err] trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']] if display: width = 0.5 ymax = 5 if isinstance(display, bool): plt.figure("Ephys FPGA Sync") ax = plt.gca() else: ax = display r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0']) plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k') plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k') plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k') plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k') plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax, ax=ax, label='goCue_times', color='b', linewidth=width) plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax, ax=ax, label='start_trial', color='m', linewidth=width) plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax, ax=ax, label='error tone', color='r', linewidth=width) plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax, ax=ax, label='valveOpen_times', color='g', linewidth=width) plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax, ax=ax, label='stimFreeze_times', color='y', linewidth=width) plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax, ax=ax, label='stim off', color='c', linewidth=width) plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax, ax=ax, label='stimOn_times', color='tab:orange', linewidth=width) c = _get_sync_fronts(sync, chmap['left_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['right_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['body_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k') ax.legend() ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', '']) ax.set_yticks([0, 1, 2, 3, 4, 5]) ax.set_ylim([0, 5]) return trials
b02ec14a5714f1387acb12f1ec2d5bbbc1684f67
3,658,667
def is_attr_defined(attrs,dic): """ Check if the sequence of attributes is defined in dictionary 'dic'. Valid 'attrs' sequence syntax: <attr> Return True if single attrbiute is defined. <attr1>,<attr2>,... Return True if one or more attributes are defined. <attr1>+<attr2>+... Return True if all the attributes are defined. """ if OR in attrs: for a in attrs.split(OR): if dic.get(a.strip()) is not None: return True else: return False elif AND in attrs: for a in attrs.split(AND): if dic.get(a.strip()) is None: return False else: return True else: return dic.get(attrs.strip()) is not None
542388846fabc79e126203d80a63db6901a71897
3,658,669
def c_str_repr(str_): """Returns representation of string in C (without quotes)""" def byte_to_repr(char_): """Converts byte to C code string representation""" char_val = ord(char_) if char_ in ['"', '\\', '\r', '\n']: return '\\' + chr(char_val) elif (ord(' ') <= char_val <= ord('^') or char_val == ord('_') or ord('a') <= char_val <= ord('~')): return chr(char_val) else: return '\\x%02x' % char_val return '"%s"' % ''.join((byte_to_repr(x) for x in str_))
e7cce729a00a7d2a35addf95eb097a3caa06bedd
3,658,670
def getActiveTeamAndID(): """Returns the Team ID and CyTeam for the active player.""" return getActiveTeamID(), getActiveTeam()
edf58aee8d9126ddc25afd94becf641330e13ca2
3,658,672
from typing import Union from typing import BinaryIO from typing import Tuple from typing import Optional def is_nitf( file_name: Union[str, BinaryIO], return_version=False) -> Union[bool, Tuple[bool, Optional[str]]]: """ Test whether the given input is a NITF 2.0 or 2.1 file. Parameters ---------- file_name : str|BinaryIO return_version : bool Returns ------- is_nitf_file: bool Is the file a NITF file, based solely on checking initial bytes. nitf_version: None|str Only returned is `return_version=True`. Will be `None` in the event that `is_nitf_file=False`. """ header = _fetch_initial_bytes(file_name, 9) if header is None: if return_version: return False, None else: return False ihead = header[:4] vers = header[4:] if ihead == b'NITF': try: vers = vers.decode('utf-8') return (True, vers) if return_version else True except ValueError: pass return (False, None) if return_version else False
6e28baa09d6b8e173db00671e1ed08023630110b
3,658,673
def get_xlsx_filename() -> str: """ Get the name of the excel file. Example filename: kesasetelihakemukset_2021-01-01_23-59-59.xlsx """ local_datetime_now_as_str = timezone.localtime(timezone.now()).strftime( "%Y-%m-%d_%H-%M-%S" ) filename = f"kesasetelihakemukset_{local_datetime_now_as_str}.xlsx" return filename
fb8715f30bd91f39d9836bf59504ad85c205bdf3
3,658,675
from pathlib import Path def get_content_directory() -> Path: """ Get the path of the markdown `content` directory. """ return get_base_directory() / "content"
2b6f7a9c676e8128fafd43b26cf62aa736aa957c
3,658,676
import math def mag_inc(x, y, z): """ Given *x* (north intensity), *y* (east intensity), and *z* (vertical intensity) all in [nT], return the magnetic inclincation angle [deg]. """ h = math.sqrt(x**2 + y**2) return math.degrees(math.atan2(z, h))
f4036358625dd9d032936afc373e53bef7c1e6e1
3,658,677
import torch def rgb_to_rgba(image, alpha_val): """ Convert an image from RGB to RGBA. """ if not isinstance(image, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError(f"Input size must have a shape of (*, 3, H, W).Got {image.shape}") if not isinstance(alpha_val, (float, torch.Tensor)): raise TypeError(f"alpha_val type is not a float or torch.Tensor. Got {type(alpha_val)}") # add one channel r, g, b = torch.chunk(image, image.shape[-3], dim=-3) if isinstance(alpha_val, float): a = torch.full_like(r, fill_value=float(alpha_val)) return torch.cat([r, g, b, a], dim=-3)
5bab73c37ff81c431ed88ce7d39743cce6c15c56
3,658,678
def get(identifier): """get the activation function""" if identifier is None: return linear if callable(identifier): return identifier if isinstance(identifier, str): activations = { "relu": relu, "sigmoid": sigmoid, "tanh": tanh, "linear": linear, } return activations[identifier]
005789e8cdadff97875f002b9776d8d8bdb22d56
3,658,680
def df_add_column_codelines(self, key): """Generate code lines to add new column to DF""" func_lines = df_set_column_index_codelines(self) # provide res_index = ... results = [] for i, col in enumerate(self.columns): col_loc = self.column_loc[col] type_id, col_id = col_loc.type_id, col_loc.col_id res_data = f'res_data_{i}' func_lines += [ f' data_{i} = self._data[{type_id}][{col_id}]', f' {res_data} = pandas.Series(data_{i}, index=res_index, name="{col}")', ] results.append((col, res_data)) res_data = 'new_res_data' literal_key = key.literal_value func_lines += [f' {res_data} = pandas.Series(value, index=res_index, name="{literal_key}")'] results.append((literal_key, res_data)) data = ', '.join(f'"{col}": {data}' for col, data in results) func_lines += [f' return pandas.DataFrame({{{data}}}, index=res_index)'] return func_lines
742241d973bb46da2a75b40bf9a76c91ba759d98
3,658,681
import torch def resize_bilinear_nd(t, target_shape): """Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor """ shape = list(t.shape) target_shape = list(target_shape) assert len(shape) == len(target_shape) # We progressively move through the shape, resizing dimensions... d = 0 while d < len(shape): # If we don't need to deal with the next dimension, step over it if shape[d] == target_shape[d]: d += 1 continue # Otherwise, we'll resize the next two dimensions... # If d+2 doesn't need to be resized, this will just be a null op for it new_shape = shape[:] new_shape[d:d+2] = target_shape[d:d+2] # The helper collapse_shape() makes our shapes 4-dimensional with # the two dimensions we want to deal with on the outside. shape_ = collapse_shape(shape, d, d+2) new_shape_ = collapse_shape(new_shape, d, d+2) # We can then reshape and use torch.nn.Upsample() on the # outer two dimensions. t_ = t.view(shape_) # transpose [0, 1, 2, 3] to [0, 3, 1, 2] t_ = torch.transpose(t_, 1, 3) t_ = torch.transpose(t_, 2, 3) upsample = torch.nn.Upsample(size=new_shape_[1:3], mode='bilinear', align_corners=True) t_ = upsample(t_) t_ = torch.transpose(t_, 2, 3) t_ = torch.transpose(t_, 1, 3) # And then reshape back to our uncollapsed version, having finished resizing # two more dimensions in our shape. t = t_.reshape(new_shape) shape = new_shape d += 2 return t
005266983cca744437826673ff8dd379afb699e2
3,658,682
def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [-1, -1, -1] with open(disambiguatestatsfilename, "r") as in_handle: header = in_handle.readline().strip().split("\t") if header == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']: disambig_stats_tmp = in_handle.readline().strip().split("\t")[1:] if len(disambig_stats_tmp) == 3: disambig_stats = [int(x) for x in disambig_stats_tmp] return disambig_stats
bb05ec857181f032ae9c0916b4364b772ff7c412
3,658,683
def clean_vigenere(text): """Convert text to a form compatible with the preconditions imposed by Vigenere cipher.""" return ''.join(ch for ch in text.upper() if ch.isupper())
d7c3fc656ede6d07d6e9bac84a051581364c63a0
3,658,684
def select_artist(df_by_artists, df_rate): """This method selects artists which perform the same genre as artists were given :param df_by_artists: :param df_rate: """ # save the indices of artists, which include any of the genres in the genre profile list_of_id = [] for index, row in df_by_artists.iterrows(): for genre in row["genres"]: if(genre in df_rate.index): list_of_id.append(index) #find the unique indices list_of_id = list(set(list_of_id)) #select the artists and genres columns of the artists including any of the genres in the genre profile df_select_columns = df_by_artists.iloc[list_of_id, [col(df_by_artists, "artists"), col(df_by_artists, "genres")]] df_select = df_select_columns.copy() #create the artist-genre-matrix of new artists for index, row in df_select_columns.iterrows(): for genre in row['genres']: #artist includes genre: 1 df_select.at[index, genre] = 1 #artist does not include genre: 0 df_select = df_select.fillna(0)[df_rate.index] return df_select
85c09b62553a3257b4f325dd28d26335c9fcb033
3,658,685
import uuid def generate_uuid(class_name: str, identifier: str) -> str: """ Generate a uuid based on an identifier :param identifier: characters used to generate the uuid :type identifier: str, required :param class_name: classname of the object to create a uuid for :type class_name: str, required """ return str(uuid.uuid5(uuid.NAMESPACE_DNS, class_name + identifier))
10e85effbce04dec62cc55ee709247afa0fb0da7
3,658,686
def fetch(model, key): """Fetch by ID.""" return db.session.query(model).get(key)
4c3008bec5ed5eac593f2ad8ba2816f121362677
3,658,687
from typing import Optional def construct_filename(prefix: str, suffix: Optional[str] = '.csv') -> str: """Construct a filename containing the current date. Examples -------- .. code:: python >>> filename = construct_filename('my_file', '.txt') >>> print(filename) 'my_file_31_May_2019.txt' Parameters ---------- prefix : :class:`str` A prefix for the to-be returned filename. The current date will be appended to this prefix. sufix : :class:`str`, optional An optional sufix of the to be returned filename. No sufix will be attached if ``None``. Returns ------- :class:`str` A filename consisting of **prefix**, the current date and **suffix**. """ today = date.today() suffix = suffix or '' return prefix + today.strftime('_%d_%b_%Y') + suffix
8269947952d4c8d81cc2855a5776c3677c6a5c57
3,658,688
def make_friedman_model(point1, point2): """ Makes a vtk line source from two set points :param point1: one end of the line :param point2: other end of the line :returns: The line """ line = vtkLineSource() line.SetPoint1(point1) line.SetPoint2(point2) return line
f33046307c7c0c2bfeadfbdb4e0815bc5d42d73f
3,658,689
import re def breadcrumbs_pcoa_plot(pcl_fname, output_plot_fname, **opts): """Use breadcrumbs `scriptPcoa.py` script to produce principal coordinate plots of pcl files. :param pcl_fname: String; file name of the pcl-formatted taxonomic profile to visualize via `scriptPcoa.py`. :param output_plot_fname: String; file name of the resulting image file. :keyword **opts: Any additional keyword arguments are passed to `scriptPcoa.py` as command line flags. By default, it passes `meta=None`, `id=None` and `noShape=None`, which are converted into `--meta`, `--id`, and `--noShape`, respectively. External dependencies - Breadcrumbs: https://bitbucket.org/biobakery/breadcrumbs """ pcoa_cmd = ("scriptPcoa.py ") default_opts = { "meta" : True, "id" : True, "noShape" : True, "outputFile" : output_plot_fname } default_opts.update(opts) def sample_id(fname): id_ = str() with open(fname) as f: for line in f: if line.startswith("#"): id_ = line.split('\t')[0] continue else: return id_ or line.split('\t')[0] def last_meta_name(fname): prev_line = str() with open(fname) as f: for line in f: if re.search(r'[Bb]acteria|[Aa]rchaea.*\s+\d', line): return prev_line.split('\t')[0] prev_line = line return prev_line.split('\t')[0] def run(pcoa_cmd=pcoa_cmd): if default_opts['meta'] is True or not default_opts['meta']: default_opts['meta'] = last_meta_name(pcl_fname) if default_opts['id'] is True or not default_opts['id']: default_opts['id'] = sample_id(pcl_fname) pcoa_cmd += dict_to_cmd_opts(default_opts) pcoa_cmd += " "+pcl_fname+" " return CmdAction(pcoa_cmd, verbose=True).execute() targets = [output_plot_fname] if 'CoordinatesMatrix' in default_opts: targets.append(default_opts['CoordinatesMatrix']) yield { "name": "breadcrumbs_pcoa_plot: "+output_plot_fname, "actions": [run], "file_dep": [pcl_fname], "targets": targets }
06fc9511b21ec3c0111ba91cea8c08852eb2bcaf
3,658,690
def _parse_xml(buff): """\ Parses XML and returns the root element. """ buff.seek(0) return etree.parse(buff).getroot()
fa3876f93c0a71b9e4bf6d95dfadbf0714e7c17c
3,658,691
def After(interval): """ After waits for the duration to elapse and then sends the current time on the returned channel. It is equivalent to Timer(interval).c """ return Timer(interval).c
1011151471f839b3e9f7edad369699d76d9f7601
3,658,692
def f_score(r: float, p: float, b: int = 1): """ Calculate f-measure from recall and precision. Args: r: recall score p: precision score b: weight of precision in harmonic mean Returns: val: value of f-measure """ try: val = (1 + b ** 2) * (p * r) / (b ** 2 * p + r) except ZeroDivisionError: val = 0 return val
d12af20e30fd80cb31b2cc119d5ea79ce2507c4b
3,658,693
def show_inventory(): """Show the user what is in stock.""" context = { 'inventory': [ # Could contain any items {'name': 'apple', 'price': 1.00}, {'name': 'banana', 'price': 1.20}, {'name': 'carrot', 'price': 2.00}, ] } return render_template('show_inventory.html', **context)
be2b67abb1ebd60bacfad117dab166a08d6915b1
3,658,695
import numpy as np import re def rebuild_schema(doc, r, df): """Rebuild the schema for a resource based on a dataframe""" # Re-get the resource in the doc, since it may be different. try: r = doc.resource(r.name) except AttributeError: # Maybe r is actually a resource name r = doc.resource(r) def alt_col_name(name, i): if not name: return 'col{}'.format(i) return re.sub('_+', '_', re.sub('[^\w_]', '_', str(name)).lower()).rstrip('_') df_types = { np.dtype('O'): 'text', np.dtype('int64'): 'integer', np.dtype('float64'): 'number' } try: df_index_frame = df.index.to_frame() except AttributeError: df_index_frame = None def get_col_dtype(c): c = str(c) try: return df_types[df[c].dtype] except KeyError: # Maybe it is in the index? pass try: return df_types[df_index_frame[c].dtype] except TypeError: # Maybe not a multi-index pass if c == 'id' or c == df.index.name: return df_types[df.index.dtype] return 'unknown' columns = [] schema_term = r.schema_term[0] if schema_term: old_cols = {c['name'].value: c.properties for c in schema_term.children} for c in schema_term.children: schema_term.remove_child(c) schema_term.children = [] else: old_cols = {} schema_term = doc['Schema'].new_term('Table', r.schema_name) index_names = [n if n else "id" for n in df.index.names] for i, col in enumerate(index_names + list(df.columns)): acn = alt_col_name(col, i) if alt_col_name(col, i) != str(col) else '' d = {'name': col, 'datatype': get_col_dtype(col), 'altname': acn} if col in old_cols.keys(): lookup_name = col elif acn in old_cols.keys(): lookup_name = acn else: lookup_name = None if lookup_name and lookup_name in old_cols: for k, v in schema_term.properties.items(): old_col = old_cols.get(lookup_name) for k, v in old_col.items(): if k != 'name' and v: d[k] = v columns.append(d) for c in columns: name = c['name'] del c['name'] datatype = c['datatype'] del c['datatype'] altname = c['altname'] del c['altname'] schema_term.new_child('Column', name, datatype=datatype, altname=altname, **c)
ed212e5cff26dcfece99e3361df9d61823c2bfde
3,658,697
def compute_similarity(image, reference): """Compute a similarity index for an image compared to a reference image. Similarity index is based on a the general algorithm used in the AmphiIndex algorithm. - identify slice of image that is a factor of 256 in size - rebin image slice down to a (256,256) image - rebin same slice from reference down to a (256,256) image - sum the differences of the rebinned slices - divide absolute value of difference scaled by reference slice sum .. note:: This index will typically return values < 0.1 for similar images, and values > 1 for dis-similar images. Parameters ---------- image : ndarray Image (as ndarray) to measure reference : ndarray Image which serves as the 'truth' or comparison image. Returns ------- similarity_index : float Value of similarity index for `image` """ # Insure NaNs are replaced with 0 image = np.nan_to_num(image[:], nan=0) reference = np.nan_to_num(reference[:], nan=0) imgshape = (min(image.shape[0], reference.shape[0]), min(image.shape[1], reference.shape[1])) minsize = min(imgshape[0], imgshape[1]) # determine largest slice that is a power of 2 in size window_bit = maxBit(minsize) window = 2**window_bit # Define how big the rebinned image should be for computing the sim index # Insure a minimum rebinned size of 64x64 sim_bit = (window_bit - 2) if (window_bit - 2) > 6 else window_bit sim_size = 2**sim_bit # rebin image and reference img = rebin(image[:window, :window], (sim_size, sim_size)) ref = rebin(reference[:window, :window], (sim_size, sim_size)) # Compute index diffs = np.abs((img - ref).sum()) sim_indx = diffs / img.sum() return sim_indx
0b49009bfdd0697999e61825390a8f883ae8dd79
3,658,698
def _create_npu_quantization( scale, zero_point, ): """This is a helper function to capture a list of arguments to create Vela NpuQuantization object """ # Scale could be an ndarray if per-channel quantization is available if not isinstance(scale, tvm.tir.expr.Load): if isinstance(scale.value, float): scale = np.single(scale.value) else: assert isinstance(scale.value.value, float) scale = np.single(scale.value.value) q_params = vapi.NpuQuantization(scale_f32=scale, zero_point=zero_point.value) return q_params
71f7e20a760940e6d46301ccd9130265de140b29
3,658,699
def article_markdown(text): """ 对传入的text文本进行markdown """ renderer = ArticleRenderer() markdown = mistune.Markdown(renderer=renderer) return markdown(text)
32d1edc0d5155c62b0dc0ff18dc9a44f1ec85d7a
3,658,700
def _gen_key(user_id, key_name): """ Tuck this into UserManager """ try: manager = users.UserManager.instance() private_key, fingerprint = manager.generate_key_pair(user_id, key_name) except Exception as ex: return {'exception': ex} return {'private_key': private_key, 'fingerprint': fingerprint}
f5babf523bded37ba624295a7435e2709488d47a
3,658,703
def svhn_loader(size=None,root="./shvn",set="train",batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): """ :param size: :param root: :param set: :param batch_size: :param mean: :param std: :param transform: :param download: :param target_transform: :param loader_args: :return: """ valid_sets = ('train', 'test', 'extra') if set not in valid_sets: raise ValueError("set {} is invalid, valid sets include {}".format(set,valid_sets)) if size is not None: if not isinstance(size,tuple): size = (size,size) if transform == "default": t = [] if size is not None: t.append(transformations.Resize(size)) t.append(transformations.ToTensor()) if mean is not None and std is not None: if not isinstance(mean, tuple): mean = (mean,) if not isinstance(std, tuple): std = (std,) t.append(transformations.Normalize(mean=mean, std=std)) trans = transformations.Compose(t) else: trans = transform data = SVHN(root,split=set,transform=trans,download=download,target_transform=target_transform) shuffle_mode = True if set == "train" else False return DataLoader(data,batch_size=batch_size,shuffle=shuffle_mode,**loader_args)
f40cd95338f4e745cbbb849ac8a9999f98245cf0
3,658,704
import pkg_resources def get_supervisees(): """Pull the supervisor specifications out of the entry point.""" eps = list(pkg_resources.iter_entry_points(ENTRY_POINT_GROUP)) return dict((ep.name, ep.load()) for ep in eps)
6a812bb8422382c6e481bab8b27651786984ea66
3,658,705
async def index(request): """ This is the view handler for the "/" url. **Note: returning html without a template engine like jinja2 is ugly, no way around that.** :param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request :return: aiohttp.web.Response object """ # {% if database.is_none and example.is_message_board %} # app.router allows us to generate urls based on their names, # see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources message_url = request.app.router['messages'].url_for() ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="""\ <p>Success! you've setup a basic aiohttp app.</p> <p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p> <b> <a href="{message_url}">View and add messages</a> </b>""".format(message_url=message_url) ) # {% else %} ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="<p>Success! you've setup a basic aiohttp app.</p>", ) # {% endif %} # with the base web.Response type we have to manually set the content type, otherwise text/plain will be used. return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html')
f90ba225055bf77b39942da7fc1b1b2f5b4a7286
3,658,706
def adtg(s, t, p): """ Calculates adiabatic temperature gradient as per UNESCO 1983 routines. Parameters ---------- s(p) : array_like salinity [psu (PSS-78)] t(p) : array_like temperature [℃ (ITS-90)] p : array_like pressure [db] Returns ------- adtg : array_like adiabatic temperature gradient [℃ db :sup:`-1`] Examples -------- >>> # Data from UNESCO 1983 p45. >>> import seawater as sw >>> from seawater.library import T90conv >>> t = T90conv([[ 0, 0, 0, 0, 0, 0], ... [10, 10, 10, 10, 10, 10], ... [20, 20, 20, 20, 20, 20], ... [30, 30, 30, 30, 30, 30], ... [40, 40, 40, 40, 40, 40]]) >>> s = [[25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35]] >>> p = [0, 5000, 10000, 0, 5000, 10000] >>> sw.adtg(s, t, p) array([[ 1.68710000e-05, 1.04700000e-04, 1.69426000e-04, 3.58030000e-05, 1.17956500e-04, 1.77007000e-04], [ 1.00194580e-04, 1.60959050e-04, 2.06874170e-04, 1.14887280e-04, 1.71364200e-04, 2.12991770e-04], [ 1.73819840e-04, 2.13534000e-04, 2.44483760e-04, 1.84273240e-04, 2.21087800e-04, 2.49137960e-04], [ 2.41720460e-04, 2.64764100e-04, 2.82959590e-04, 2.47934560e-04, 2.69466550e-04, 2.86150390e-04], [ 3.07870120e-04, 3.16988600e-04, 3.23006480e-04, 3.09844920e-04, 3.18839700e-04, 3.24733880e-04]]) References ---------- .. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for computation of fundamental properties of seawater. UNESCO Tech. Pap. in Mar. Sci., No. 44, 53 pp. http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf .. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic temperature gradient and potential temperature of sea water. Deep-Sea Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6 """ s, t, p = map(np.asanyarray, (s, t, p)) T68 = T68conv(t) a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10] b = [1.8932e-6, -4.2393e-8] c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14] d = [-1.1351e-10, 2.7759e-12] e = [-4.6206e-13, 1.8676e-14, -2.1687e-16] return (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 + (b[0] + b[1] * T68) * (s - 35) + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) + (d[0] + d[1] * T68) * (s - 35)) * p + (e[0] + (e[1] + e[2] * T68) * T68) * p * p)
8d195810ad52215135db4ef8f9825a914b01522c
3,658,707
import re def calculate_ion_mz(seq: str, ion: str = 'M', charge: int = 0 ) -> float: """ given a peptide sequence and ion type, count the number of atoms, accounting for ion type and whether cysteines are measured by IAA - ion type M: full peptide parent ion (with H2O) b: b ion (no addition) y: y ion (with H2O) :param seq: str amino acid sequence with modifications defined by [] :param ion: str ion type (default: M to return peptide mass) :param charge: int numerical charge (default: 0 to return peptide mass) :return: float accurate mass """ assert type(charge) == int, "Charge must be integer." mass = 0 # First, strip all mass shifts and add them to the starting mass try: mods = [float(mod[1:-1]) for mod in re.findall('\\[.*?]', seq)] except ValueError: raise ValueError('Modification contains string characters.') # 2021-11-22 exclude label mass from peptide mass calculation mass += sum(m for m in mods if m not in params.label_mass) # 2021-05-18 strip all N-terminal n from Comet seq = re.sub('^n', '', seq) # Strip all modifications stripped = re.sub('\\[.*?]', '', seq) res_atoms = _count_residue_atoms(stripped, iaa=params.iaa, # add iodoacetamide to cysteine ) # dictionary for complementary atoms to add to ion types comp_atom_dict = { 'M': [0, 2, 1, 0, 0], 'b': [0, 0, 0, 0, 0], 'y': [0, 2, 1, 0, 0], 'b_': [0, -2, -1, 0, 0], 'y_': [0, 0, 0, 0, 0], } comp_atoms = comp_atom_dict[ion] ion_atoms = [res_atoms[i] + comp_atoms[i] for i, v in enumerate(res_atoms)] mass += _calc_atom_mass(ion_atoms) # Return peptide mass if charge is 0 if charge > 0: mz = (mass + constants.PROTON_MASS * charge) / charge return mz if charge < 0: raise ValueError('Negative charges are not supported.') return mass
e032ad439414314511b008d98dadb23b84012798
3,658,708
def hhc_to_int(s): """Parse a number expressed in sortable hhc as an integer (or long). >>> hhc_to_int('-') 0 >>> hhc_to_int('.') 1 >>> hhc_to_int('~') 65 >>> hhc_to_int('.-') 66 >>> hhc_to_int('..') 67 >>> hhc_to_int('.XW') 6700 >>> hhc_to_int('----..') 67 >>> print(hhc_to_int('fDpEShMz-qput')) 302231454903657293676544 Negative numbers are supported. >>> hhc_to_int(',zST') -6700 """ if s == '' or s is None or s[:2] == ',,': raise ValueError("invalid literal for hhc_to_int: {}".format(s)) if s[0] == NEGATIVE_PREFIX: return -hhc2_to_int(s[1:], alphabet=HHC_ALPHABET[::-1]) return hhc2_to_int(s, HHC_ALPHABET)
25f6e8097f1fbf0f6ceed08fc8ac0195fb88acb4
3,658,710
def initializeSens(P, B, idxs): """ This function initializes the sensitivities using the bicriteria algorithm, to be the distance between each point to it's closest flat from the set of flats B divided by the sum of distances between self.P.P and B. :param B: A set of flats where each flat is represented by an orthogonal matrix and a translation vector. :param idxs: A numpy array which represents the clustering which B imposes on self.P.P :return: None. """ centers_idxs = np.unique(idxs) # number of clusters imposed by B sensitivity_additive_term = np.zeros((P.shape[0], )) for center_idx in centers_idxs: # go over each cluster of points from self.P.P cluster_per_center = np.where(idxs == center_idx)[0] # get all points in certain cluster # compute the distance of each point in the cluster to its respect flat cost_per_point_in_cluster = Utils.computeDistanceToSubspace(P[cluster_per_center, :-1], B[center_idx][0], B[center_idx][1]) # ost_per_point_in_cluster = np.apply_along_axis(lambda x: # Utils.computeDistanceToSubspace(x, B[center_idx][0], # B[center_idx][1]), 1, # self.set_P.P[cluster_per_center, :-1]) # set the sensitivity to the distance of each point from its respected flat divided by the total distance # between cluster points and the respected flat sensitivity_additive_term[cluster_per_center] = 2 ** Utils.J * \ np.nan_to_num(cost_per_point_in_cluster / np.sum(cost_per_point_in_cluster)) return sensitivity_additive_term
6726c892311d1590adea62babde6023d4b7d67a3
3,658,711
def fast_knn(data, k=3, eps=0, p=2, distance_upper_bound=np.inf, leafsize=10, idw=util_idw.shepards): """ Impute using a variant of the nearest neighbours approach Basic idea: Impute array with a basic mean impute and then use the resulting complete array to construct a KDTree. Use this KDTree to compute nearest neighbours. After finding `k` nearest neighbours, take the weighted average of them. Basically, find the nearest row in terms of distance This approach is much, much faster than the other implementation (fit+transform for each subset) which is almost prohibitively expensive. Parameters ---------- data: numpy.ndarray 2D matrix to impute. k: int, optional Parameter used for method querying the KDTree class object. Number of neighbours used in the KNN query. Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). eps: nonnegative float, optional Parameter used for method querying the KDTree class object. From the SciPy docs: "Return approximate nearest neighbors; the kth returned value is guaranteed to be no further than (1+eps) times the distance to the real kth nearest neighbor". Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). p : float, 1<=p<=infinity, optional Parameter used for method querying the KDTree class object. Straight from the SciPy docs: "Which Minkowski p-norm to use. 1 is the sum-of-absolute-values Manhattan distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance". Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). distance_upper_bound : nonnegative float, optional Parameter used for method querying the KDTree class object. Straight from the SciPy docs: "Return only neighbors within this distance. This is used to prune tree searches, so if you are doing a series of nearest-neighbor queries, it may help to supply the distance to the nearest neighbor of the most recent point." Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). leafsize: int, optional Parameter used for construction of the `KDTree` class object. Straight from the SciPy docs: "The number of points at which the algorithm switches over to brute-force. Has to be positive". Refer to the docs for [`scipy.spatial.KDTree`](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.KDTree.html) for more information. idw: fn, optional Function that takes one argument, a list of distances, and returns weighted percentages. You can define a custom one or bootstrap from functions defined in `impy.util.inverse_distance_weighting` which can be using functools.partial, for example: `functools.partial(impy.util.inverse_distance_weighting.shepards, power=1)` Returns ------- numpy.ndarray Imputed data. Examples -------- >>> data = np.arange(25).reshape((5, 5)).astype(np.float) >>> data[0][2] = np.nan >>> data array([[ 0., 1., nan, 3., 4.], [ 5., 6., 7., 8., 9.], [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.], [20., 21., 22., 23., 24.]]) >> fast_knn(data, k=1) # Weighted average (by distance) of nearest 1 neighbour array([[ 0., 1., 7., 3., 4.], [ 5., 6., 7., 8., 9.], [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.], [20., 21., 22., 23., 24.]]) >> fast_knn(data, k=2) # Weighted average of nearest 2 neighbours array([[ 0. , 1. , 10.08608891, 3. , 4. ], [ 5. , 6. , 7. , 8. , 9. ], [10. , 11. , 12. , 13. , 14. ], [15. , 16. , 17. , 18. , 19. ], [20. , 21. , 22. , 23. , 24. ]]) >> fast_knn(data, k=3) array([[ 0. , 1. , 13.40249283, 3. , 4. ], [ 5. , 6. , 7. , 8. , 9. ], [10. , 11. , 12. , 13. , 14. ], [15. , 16. , 17. , 18. , 19. ], [20. , 21. , 22. , 23. , 24. ]]) >> fast_knn(data, k=5) # There are at most only 4 neighbours. Raises error ... IndexError: index 5 is out of bounds for axis 0 with size 5 """ null_xy = find_null(data) data_c = mean(data) kdtree = KDTree(data_c, leafsize=leafsize) for x_i, y_i in null_xy: distances, indices = kdtree.query(data_c[x_i], k=k+1, eps=eps, p=p, distance_upper_bound=distance_upper_bound) # Will always return itself in the first index. Delete it. distances, indices = distances[1:], indices[1:] # Add small constant to distances to avoid division by 0 distances += 1e-3 weights = idw(distances) # Assign missing value the weighted average of `k` nearest neighbours data[x_i][y_i] = np.dot(weights, [data_c[ind][y_i] for ind in indices]) return data
976e51c66878643965b099f764595629b379d440
3,658,712
def role_generator(role): """Closure function returning a role function.""" return lambda *args, **kwargs: role.run(*args, **kwargs)
35dd1a54cb53a6435633c39608413c2d0b9fe841
3,658,713
def pick_slices(img, num_slices_per_view): """ Picks the slices to display in each dimension, skipping any empty slices (without any segmentation at all). """ slices = list() for view in range(len(img.shape)): dim_size = img.shape[view] non_empty_slices = np.array( [sl for sl in range(dim_size) if np.count_nonzero(get_axis(img, view, sl)) > 0]) num_non_empty = len(non_empty_slices) # trying to 5% slices at the tails (bottom clipping at 0) skip_count = max(0, np.around(num_non_empty * 0.05).astype('int16')) # only when possible if skip_count > 0 and (num_non_empty - 2 * skip_count >= num_slices_per_view): non_empty_slices = non_empty_slices[skip_count: -skip_count] num_non_empty = len(non_empty_slices) # sampling non-empty slices only sampled_indices = np.linspace(0, num_non_empty, num=min(num_non_empty, num_slices_per_view), endpoint=False) slices_in_dim = non_empty_slices[np.around(sampled_indices).astype('int64')] # ensure you do not overshoot slices_in_dim = [sn for sn in slices_in_dim if sn >= 0 or sn <= num_non_empty] slices.append(slices_in_dim) return slices
ed80e4bd53e6a72c6ad7cad899d875ac320b33b7
3,658,714
import json def cate2(request): """ DB에서 Cate2의 분류 이름을 반환 """ cate1 = Cate1.objects.get(cate1_name=request.GET.get('cate1')) cate2 = list(map(lambda cate2 : cate2['cate2_name'], Cate2.objects.filter(cate1=cate1).values('cate2_name'))) json_data = json.dumps({'cate2': cate2}) return HttpResponse(json_data, content_type="application/json")
ebeca48bb9d6550fb34d68c453ef1fb47225fb4a
3,658,715
def dayChange(): """ Day Change Calculates and stores in a dictionary the total current change in position value since yesterday, which is (current_price - lastday_price)* qty. :return: dictionary """ daychange = dict() for position in portfolio: # Strings are returned from API; convert to floating point type current = float(position.current_price) last = float(position.lastday_price) quant = float(position.qty) daychange[position.symbol] = (current - last) * quant return daychange
f33979f25ffe44a0de8ec0abc1c02284e8fe5427
3,658,717
def look_for_section(line): """Look for one of the sections in a line of text.""" for key in SECTIONS: if line.startswith(key): return key return None
d44ad97312528c4fea856e705be8e6820695fd9a
3,658,718
def SetStrucIdx(sid, index): """ Change structure index @param sid: structure type ID @param index: new index of the structure @return: != 0 - ok @note: See GetFirstStrucIdx() for the explanation of structure indices and IDs. """ s = idaapi.get_struc(sid) if not s: return 0 return idaapi.set_struc_idx(s, index)
8b246d6e2fb155bdc789536f75e20971a54ddfc3
3,658,719
import json def extract_user_dict_from_tweet( tweet: Tweet ): """Takes the other_data field from a tweet object and extracts the data for the user from it. It returns a dictionary rather than a User model object because we might want to try looking up whether the user exists before creating a new user object. :type tweet Tweet :returns dict """ if tweet.other_data and len( tweet.other_data ) > 0: # extract the json into a dict j = json.loads( tweet.other_data ) # extract the user json from the created dict return json.loads( j[ 'user' ] )
533d8795c652e5c7f1299f3dcc04fc30de644222
3,658,720
def in_scope(repository_data): """Return whether the given repository is in scope for the configuration. Keyword arguments: repository_data -- data for the repository """ if "scope" in repository_data["configuration"] and repository_data["configuration"]["scope"] == "all": return True # Determine if user has sufficient permissions in the repository to approve the workflow run return not repository_data["object"].archived and ( repository_data["permissions"] == "write" or repository_data["permissions"] == "admin" )
0e521f805f69a1c6f306700680d42fbe76595c3a
3,658,721
from typing import Union from typing import Tuple from typing import Optional from typing import List from typing import Any def run_image_container_checks( image_container: Union[AICSImage, Reader], set_scene: str, expected_scenes: Tuple[str, ...], expected_current_scene: str, expected_shape: Tuple[int, ...], expected_dtype: np.dtype, expected_dims_order: str, expected_channel_names: Optional[List[str]], expected_physical_pixel_sizes: Tuple[ Optional[float], Optional[float], Optional[float] ], expected_metadata_type: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]], ) -> Union[AICSImage, Reader]: """ A general suite of tests to run against image containers (Reader and AICSImage). """ # Check serdes check_can_serialize_image_container(image_container) # Set scene image_container.set_scene(set_scene) # Check scene info assert image_container.scenes == expected_scenes assert image_container.current_scene == expected_current_scene # Check basics assert image_container.shape == expected_shape assert image_container.dtype == expected_dtype assert image_container.dims.order == expected_dims_order assert image_container.dims.shape == expected_shape assert image_container.channel_names == expected_channel_names assert image_container.physical_pixel_sizes == expected_physical_pixel_sizes assert isinstance(image_container.metadata, expected_metadata_type) # Read different chunks zyx_chunk_from_delayed = image_container.get_image_dask_data("ZYX").compute() cyx_chunk_from_delayed = image_container.get_image_dask_data("CYX").compute() # Check image still not fully in memory assert image_container._xarray_data is None # Read in mem then pull chunks zyx_chunk_from_mem = image_container.get_image_data("ZYX") cyz_chunk_from_mem = image_container.get_image_data("CYX") # Compare chunk reads np.testing.assert_array_equal( zyx_chunk_from_delayed, zyx_chunk_from_mem, ) np.testing.assert_array_equal( cyx_chunk_from_delayed, cyz_chunk_from_mem, ) # Check that the shape and dtype are expected after reading in full assert image_container.data.shape == expected_shape assert image_container.data.dtype == expected_dtype # Check serdes check_can_serialize_image_container(image_container) return image_container
a502650fb227aa4425a2501f112603b681b41fbf
3,658,722
from typing import Type def collect_validation_helper(package_names: str) -> Type[ValidationHelper]: """Finds subclasses of the validate.ValidationHelper from a list of package names. Args: package_names: A list of Python package names as strings. Returns: A validator class that are subclasses of validate.ValidationHelper. """ validation_cls = find_subclasses(package_names, ValidationHelper) return validation_cls[0]
bb38b734641a025a9d7fd26d46ebfb1476879c82
3,658,723
def heartbeat(request): """Test that ElasticSearch is operationnal. :param request: current request object :type request: :class:`~pyramid:pyramid.request.Request` :returns: ``True`` is everything is ok, ``False`` otherwise. :rtype: bool """ indexer = request.registry.indexer try: return indexer.client.ping() except Exception as e: logger.exception(e) return False
6acf0b21b6fcc64f70ca75cb6795df0b5109f273
3,658,724