content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def spline(xyz, s=3, k=2, nest=-1): """ Generate B-splines as documented in http://www.scipy.org/Cookbook/Interpolation The scipy.interpolate packages wraps the netlib FITPACK routines (Dierckx) for calculating smoothing splines for various kinds of data and geometries. Although the data is evenly spaced in this example, it need not be so to use this routine. Parameters --------------- xyz : array, shape (N,3) array representing x,y,z of N points in 3d space s : float, optional A smoothing condition. The amount of smoothness is determined by satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger s means more smoothing while smaller values of s indicate less smoothing. Recommended values of s depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a: good s value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is the number of datapoints in x, y, and w. k : int, optional Degree of the spline. Cubic splines are recommended. Even values of k should be avoided especially with a small s-value. for the same set of data. If task=-1 find the weighted least square spline for a given set of knots, t. nest : None or int, optional An over-estimate of the total number of knots of the spline to help in determining the storage space. None results in value m+2*k. -1 results in m+k+1. Always large enough is nest=m+k+1. Default is -1. Returns ---------- xyzn : array, shape (M,3) array representing x,y,z of the M points inside the sphere Examples ---------- >>> import numpy as np >>> t=np.linspace(0,1.75*2*np.pi,100)# make ascending spiral in 3-space >>> x = np.sin(t) >>> y = np.cos(t) >>> z = t >>> x+= np.random.normal(scale=0.1, size=x.shape) # add noise >>> y+= np.random.normal(scale=0.1, size=y.shape) >>> z+= np.random.normal(scale=0.1, size=z.shape) >>> xyz=np.vstack((x,y,z)).T >>> xyzn=spline(xyz,3,2,-1) >>> len(xyzn) > len(xyz) True See also ---------- scipy.interpolate.splprep scipy.interpolate.splev """ # find the knot points tckp, u = splprep([xyz[:, 0], xyz[:, 1], xyz[:, 2]], s=s, k=k, nest=nest) # evaluate spline, including interpolated points xnew, ynew, znew = splev(np.linspace(0, 1, 400), tckp) return np.vstack((xnew, ynew, znew)).T
97500c7a63bc076abd770c43fd3f6d23c30baa03
3,658,374
import time def load_supercomputers(log_file, train_ratio=0.5, windows_size=20, step_size=0, e_type='bert', mode="balance", no_word_piece=0): """ Load BGL, Thunderbird, and Spirit unstructured log into train and test data Parameters ---------- log_file: str, the file path of raw log (extension: .log). train_ratio: float, the ratio of training data for train/test split. windows_size: int, the window size for sliding window step_size: int, the step size for sliding window. if step_size is equal to window_size then fixed window is applied. e_type: str, embedding type (choose from BERT, XLM, and GPT2). mode: str, split train/testing in balance or not no_word_piece: bool, use split word into wordpiece or not. Returns ------- (x_tr, y_tr): the training data (x_te, y_te): the testing data """ print("Loading", log_file) with open(log_file, mode="r", encoding='utf8') as f: logs = f.readlines() logs = [x.strip() for x in logs] E = {} e_type = e_type.lower() if e_type == "bert": encoder = bert_encoder elif e_type == "xlm": encoder = xlm_encoder else: if e_type == "gpt2": encoder = gpt2_encoder else: raise ValueError('Embedding type {0} is not in BERT, XLM, and GPT2'.format(e_type.upper())) print("Loaded", len(logs), "lines!") x_tr, y_tr = [], [] i = 0 failure_count = 0 n_train = int(len(logs) * train_ratio) c = 0 t0 = time.time() while i < n_train - windows_size: c += 1 if c % 1000 == 0: print("\rLoading {0:.2f}% - {1} unique logs".format(i * 100 / n_train, len(E.keys())), end="") if logs[i][0] != "-": failure_count += 1 seq = [] label = 0 for j in range(i, i + windows_size): if logs[j][0] != "-": label = 1 content = logs[j] # remove label from log messages content = content[content.find(' ') + 1:] content = clean(content.lower()) if content not in E.keys(): try: E[content] = encoder(content, no_word_piece) except Exception as _: print(content) emb = E[content] seq.append(emb) x_tr.append(seq.copy()) y_tr.append(label) i = i + step_size print("\nlast train index:", i) x_te = [] y_te = [] # for i in range(n_train, len(logs) - windows_size, step_size): if i % 1000 == 0: print("Loading {:.2f}".format(i * 100 / n_train)) if logs[i][0] != "-": failure_count += 1 seq = [] label = 0 for j in range(i, i + windows_size): if logs[j][0] != "-": label = 1 content = logs[j] # remove label from log messages content = content[content.find(' ') + 1:] content = clean(content.lower()) if content not in E.keys(): E[content] = encoder(content, no_word_piece) emb = E[content] seq.append(emb) x_te.append(seq.copy()) y_te.append(label) (x_tr, y_tr) = shuffle(x_tr, y_tr) print("Total failure logs: {0}".format(failure_count)) if mode == 'balance': x_tr, y_tr = balancing(x_tr, y_tr) num_train = len(x_tr) num_test = len(x_te) num_total = num_train + num_test num_train_pos = sum(y_tr) num_test_pos = sum(y_te) num_pos = num_train_pos + num_test_pos print('Total: {} instances, {} anomaly, {} normal' \ .format(num_total, num_pos, num_total - num_pos)) print('Train: {} instances, {} anomaly, {} normal' \ .format(num_train, num_train_pos, num_train - num_train_pos)) print('Test: {} instances, {} anomaly, {} normal\n' \ .format(num_test, num_test_pos, num_test - num_test_pos)) return (x_tr, y_tr), (x_te, y_te)
2282b8cbd975160e57ff62106a7e0bad3f337e5a
3,658,375
def is_running(service: Service) -> bool: """Is the given pyodine daemon currently running? :raises ValueError: Unknown `service`. """ try: return bool(TASKS[service]) and not TASKS[service].done() except KeyError: raise ValueError("Unknown service type.")
160c7c8da0635c9c11ebdaf711b794fc0a09adff
3,658,376
def PropertyWrapper(prop): """Wrapper for db.Property to make it look like a Django model Property""" if isinstance(prop, db.Reference): prop.rel = Relation(prop.reference_class) else: prop.rel = None prop.serialize = True return prop
9f93a37dffd433fd87ffa4bfdb65680a9ad1d02d
3,658,377
def drowLine(cord,orient,size): """ The function provides the coordinates of the line. Arguments: starting x or y coordinate of the line, orientation (string. "vert" or "hor") and length of the line Return: list of two points (start and end of the line) """ global cv2 if orient == "vert": x1 = cord x2 = cord y1 = 0 y2 = size elif orient == "hor": x1 = 0 x2 = size y1 = cord y2 = cord else: print("not hor not vert") return 0 return [(x1, y1), (x2, y2)]
bc688cfe33dcf42ddac6770bbdf91ccc19c1b427
3,658,378
def bluetoothRead(): """ Returns the bluetooth address of the robot (if it has been previously stored) arguments: none returns: string - the bluetooth address of the robot, if it has been previously stored; None otherwise """ global EEPROM_BLUETOOTH_ADDRESS bt = EEPROMread(EEPROM_BLUETOOTH_ADDRESS, 17) if bluetoothValidate(bt): return bt else: return None
c4e08d438b91b3651f27b374c0b38069ddd1eaaf
3,658,379
def is_step_done(client, step_name): """Query the trail status using the client and return True if step_name has completed. Arguments: client -- A TrailClient or similar object. step_name -- The 'name' tag of the step to check for completion. Returns: True -- if the step has succeeded. False -- otherwise. """ # To understand the structure of the result returned by the API calls, please see the documentation of the # TrailClient class. statuses = client.status(fields=[StatusField.STATE], name=step_name) # In this case, the status call returns a list of step statuses. # Since we have exactly one step with each name and we are querying the status of steps with the given name, # there will be only one element in the result list. Hence we refer to the zeroth element of results. if statuses and statuses[0][StatusField.STATE] == Step.SUCCESS: return True return False
a5373d7e00f0c8526f573356b5d71a2ac08aa516
3,658,380
def on_chat_send(message): """Broadcast chat message to a watch room""" # Check if params are correct if 'roomId' not in message: return {'status_code': 400}, request.sid room_token = message['roomId'] # Check if room exist if not db.hexists('rooms', room_token): {'status_code': 404}, request.sid # Check if user wasnt in the room if not room_token in rooms(sid=request.sid): return {'status_code': 403}, request.sid # Add current sever timestamp to the state message = add_current_time_to_state(message) # Send message to everybody in the room emit('message_update', message, room=room_token) # Response return {'status_code': 200}, 200
01c7f15602653848c9310e90c0a353648fafbb52
3,658,381
from typing import Union def arima(size: int = 100, phi: Union[float, ndarray] = 0, theta: Union[float, ndarray] = 0, d: int = 0, var: float = 0.01, random_state: float = None) -> ndarray: # inherit from arima_with_seasonality """Simulate a realization from an ARIMA characteristic. Acts like `tswge::gen.arima.wge()` Parameters ---------- size: scalar int Number of samples to generate. phi: scalar float or list-like AR process order theta: scalar float or list-like MA process order d: scalar int ARIMA process difference order var: scalar float, optional Nosie variance level. random_state: scalar int, optional Seed the random number generator. Returns ------- signal: np.ndarray Simulated ARIMA. """ return arima_with_seasonality(size = size, phi = phi, theta = theta, d = d, s = 0, var = var, random_state = random_state)
24c3ac8af295d25facf0e65a4fc0925b22db9444
3,658,382
def gt_dosage(gt): """Convert unphased genotype to dosage""" x = gt.split(b'/') return int(x[0])+int(x[1])
819fc9beb834f57e44bcb0ac3e1d3c664c7efd42
3,658,383
from typing import Optional from typing import Dict from typing import Any def create_key_pair_in_ssm( ec2: EC2Client, ssm: SSMClient, keypair_name: str, parameter_name: str, kms_key_id: Optional[str] = None, ) -> Optional[KeyPairInfo]: """Create keypair in SSM.""" keypair = create_key_pair(ec2, keypair_name) try: kms_key_label = "default" kms_args: Dict[str, Any] = {} if kms_key_id: kms_key_label = kms_key_id kms_args = {"KeyId": kms_key_id} LOGGER.info( 'storing generated key in SSM parameter "%s" using KMS key "%s"', parameter_name, kms_key_label, ) ssm.put_parameter( Name=parameter_name, Description='SSH private key for KeyPair "{}" ' "(generated by Runway)".format(keypair_name), Value=keypair["KeyMaterial"], Type="SecureString", Overwrite=False, **kms_args, ) except ClientError: # Erase the key pair if we failed to store it in SSM, since the # private key will be lost anyway LOGGER.exception( "failed to store generated key in SSM; deleting " "created key pair as private key will be lost" ) ec2.delete_key_pair(KeyName=keypair_name, DryRun=False) return None return { "status": "created", "key_name": keypair.get("KeyName", ""), "fingerprint": keypair.get("KeyFingerprint", ""), }
40cca5fd938aa6709a4d844c912b294c6aaba552
3,658,384
def sumofsq(im, axis=0): """Compute square root of sum of squares. Args: im: Raw image. axis: Channel axis. Returns: Square root of sum of squares of input image. """ out = np.sqrt(np.sum(im.real * im.real + im.imag * im.imag, axis=axis)) return out
6aa791d3c6a2e8e6fff0dbe0a364350d48fb4794
3,658,385
def biquad_bp2nd(fm, q, fs, q_warp_method="cos"): """Calc coeff for bandpass 2nd order. input: fm...mid frequency in Hz q...bandpass quality fs...sampling frequency in Hz q_warp_method..."sin", "cos", "tan" output: B...numerator coefficients Laplace transfer function A...denominator coefficients Laplace transfer function b...numerator coefficients z-transfer function a...denominator coefficients z-transfer function """ wm = 2*np.pi*fm B = np.array([0, 1 / (q*wm), 0]) A = np.array([1 / wm**2, 1 / (q*wm), 1]) wmpre = f_prewarping(fm, fs) qpre = q_prewarping(q, fm, fs, q_warp_method) Bp = 0., 1 / (qpre*wmpre), 0. Ap = 1 / wmpre**2, 1 / (qpre*wmpre), 1. b, a = bilinear_biquad(Bp, Ap, fs) return B, A, b, a
c7330f9bd4a1941359a54ea6e6d7e8fe7801f55e
3,658,388
def pullAllData(): """ Pulls all available data from the database Sends all analyzed data back in a json with fileNames and list of list of all "spots" intensities and backgrounds. Args: db.d4Images (Mongo db collection): Mongo DB collection with processed data Returns: payload (jsonify(dict)): data dictionary with filename, spots, and background info statusCode (int): HTTP status code """ pullFileNames = [] pullSpotData = [] pullBgData = [] for eachEntry in db.d4Images.find(): pullFileNames.append(eachEntry["filename"]) pullSpotData.append(eachEntry["spots"]) pullBgData.append(eachEntry["background"]) payload = {"filename": pullFileNames, "spots": pullSpotData, "background": pullBgData} statusCode = 200 return jsonify(payload), statusCode
97674c981af48f37e90667c00947673f1df34c66
3,658,389
def f2(): """ >>> # +--------------+-----------+-----------+------------+-----------+--------------+ >>> # | Chromosome | Start | End | Name | Score | Strand | >>> # | (category) | (int32) | (int32) | (object) | (int64) | (category) | >>> # |--------------+-----------+-----------+------------+-----------+--------------| >>> # | chr1 | 1 | 2 | a | 0 | + | >>> # | chr1 | 6 | 7 | b | 0 | - | >>> # +--------------+-----------+-----------+------------+-----------+--------------+ >>> # Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes. >>> # For printing, the PyRanges was sorted on Chromosome and Strand. """ full_path = get_example_path("f2.bed") return pr.read_bed(full_path)
159c5167bacbeed38578a8b574b31fa2f57f9467
3,658,390
def latin(n, d): """ Build latin hypercube. Parameters ---------- n : int Number of points. d : int Size of space. Returns ------- lh : ndarray Array of points uniformly placed in d-dimensional unit cube. """ # spread function def spread(points): return sum(1./np.linalg.norm(np.subtract(points[i], points[j])) for i in range(n) for j in range(n) if i > j) # starting with diagonal shape lh = [[i/(n-1.)]*d for i in range(n)] # minimizing spread function by shuffling minspread = spread(lh) for i in range(1000): point1 = np.random.randint(n) point2 = np.random.randint(n) dim = np.random.randint(d) newlh = np.copy(lh) newlh[point1, dim], newlh[point2, dim] = newlh[point2, dim], newlh[point1, dim] newspread = spread(newlh) if newspread < minspread: lh = np.copy(newlh) minspread = newspread return lh
416d8c8086eeeaf6e8ea0bf14c300750025455be
3,658,391
def _get_valid_dtype(series_type, logical_type): """Return the dtype that is considered valid for a series with the given logical_type""" backup_dtype = logical_type.backup_dtype if ks and series_type == ks.Series and backup_dtype: valid_dtype = backup_dtype else: valid_dtype = logical_type.primary_dtype return valid_dtype
7b4bcd724d2d7a4029a794456882a8f59fc29006
3,658,392
def geometric_mean_longitude(t='now'): """ Returns the geometric mean longitude (in degrees). Parameters ---------- t : {parse_time_types} A time (usually the start time) specified as a parse_time-compatible time string, number, or a datetime object. """ T = julian_centuries(t) result = 279.696680 + 36000.76892 * T + 0.0003025 * T**2 result = result * u.deg return Longitude(result)
c47f106392f507d7750f86cba6a7c16ba3270b11
3,658,393
def get_or_create(model, **kwargs): """Get or a create a database model.""" instance = model.query.filter_by(**kwargs) if instance: return instance else: instance = model(**kwargs) db.session.add(instance) return instance
6af359ebda80b81a0d02762d576ff407f0c186c4
3,658,396
def test_class_id_cube_strategy_elliptic_paraboloid(experiment_enviroment, renormalize, thread_flag): """ """ tm, dataset, experiment, dictionary = experiment_enviroment class_id_params = { "class_ids" + MAIN_MODALITY: list(np.arange(0, 1.0, 0.25)), "class_ids" + NGRAM_MODALITY: list(np.arange(0, 2.05, 0.25)), } def retrieve_elliptic_paraboloid_score(topic_model): """ """ model = topic_model._model return -((model.class_ids[MAIN_MODALITY]-0.6-model.class_ids[NGRAM_MODALITY]) ** 2 + (model.class_ids[MAIN_MODALITY]-0.6+model.class_ids[NGRAM_MODALITY]/2) ** 2) cube = CubeCreator( num_iter=1, parameters=class_id_params, reg_search="grid", strategy=GreedyStrategy(renormalize), tracked_score_function=retrieve_elliptic_paraboloid_score, separate_thread=thread_flag ) dummies = cube(tm, dataset) tmodels_lvl2 = [dummy.restore() for dummy in dummies] if not renormalize: assert len(tmodels_lvl2) == sum(len(m) for m in class_id_params.values()) else: assert len(tmodels_lvl2) == 10 if renormalize: CLASS_IDS_FOR_CHECKING = [(1.0, 0.0), (1.0, 0.0), (0.8, 0.2), (0.667, 0.333), (0.571, 0.429), (0.5, 0.5), (0.444, 0.556), (0.4, 0.6), (0.364, 0.636), (0.333, 0.667)] for i, one_model in enumerate(tmodels_lvl2): assert np.round(one_model.class_ids[MAIN_MODALITY], 3) == CLASS_IDS_FOR_CHECKING[i][0] assert np.round(one_model.class_ids[NGRAM_MODALITY], 3) == CLASS_IDS_FOR_CHECKING[i][1] else: one_model = tmodels_lvl2[len(class_id_params["class_ids" + MAIN_MODALITY])] assert np.round(one_model.class_ids[MAIN_MODALITY], 3) == 0.5 assert np.round(one_model.class_ids[NGRAM_MODALITY], 3) == 0 assert cube.strategy.best_score >= -0.09
fc5a17e5bf6b158ce242b4289938dec4d2d2e32b
3,658,397
from typing import Dict from typing import List def apply_filters(filters: Dict, colnames: List, row: List) -> List: """ Process data based on filter chains :param filters: :param colnames: :param row: :return: """ if filters: new_row = [] for col, data in zip(colnames, row): if col in filters: params = filters[col][:] for f in params: current_filter = f[:] # copy so that pop does not break next iteration filter_name = current_filter.pop(0) if filter_name not in FILTERS: raise FilterError(f"Error: Invalid filter name: {filter_name}") func, num_params = FILTERS[filter_name][:2] if len(current_filter) != num_params: raise FilterError( f"Error: Incorrect number of params for {filter_name}. Expected {num_params}, got {len(current_filter)})") data = func(data, *current_filter) new_row.append(data) return new_row return row
e52e8b2773dc4e794076b8a480e5eaaab50de06e
3,658,398
def kaiming(shape, dtype, partition_info=None): """Kaiming initialization as described in https://arxiv.org/pdf/1502.01852.pdf""" return tf.random.truncated_normal(shape) * tf.sqrt(2 / float(shape[0]))
153213279909bf01e9782e0e56d270632c502b27
3,658,399
def trunc_artist(df: pd.DataFrame, artist: str, keep: float = 0.5, random_state: int = None): """ Keeps only the requested portion of songs by the artist (this method is not in use anymore) """ data = df.copy() df_artist = data[data.artist == artist] data = data[data.artist != artist] orig_length = len(df_artist) try: df_artist = df_artist.sample(int(len(df_artist) * keep), random_state=random_state) except ValueError: pass new_length = len(df_artist) print("Truncating data for {artist}, original length = {orig}, new length = {new}".format(artist=artist, orig=orig_length, new=new_length)) data = data.append(df_artist) return data.reset_index(drop=True)
7157e223bdf87d0463820565e40eade3e1725ae5
3,658,400
async def test_postprocess_results(original, expected): """Test Application._postprocess_results.""" callback1_called = False callback2_called = False app = Application("testing") @app.result_postprocessor async def callback1(app, message): nonlocal callback1_called callback1_called = True return message + 1 @app.result_postprocessor async def callback2(app, message): nonlocal callback2_called callback2_called = True # Nothing is returned out of Application._postprocess_results so # the assertion needs to happen inside a callback. assert message == expected await app._postprocess_results([original]) assert callback1_called assert callback2_called
9c2a6bdfcb281d62959135be01693baaaf266780
3,658,401
def task_migrate(): """Create django databases""" return { 'actions': ['''cd CCwebsite && python3 manage.py migrate'''] }
d0d146c2e628abbe33714ae0ff6a546aab9842cc
3,658,403
import numpy def distance_to_arc(alon, alat, aazimuth, plons, plats): """ Calculate a closest distance between a great circle arc and a point (or a collection of points). :param float alon, alat: Arc reference point longitude and latitude, in decimal degrees. :param azimuth: Arc azimuth (an angle between direction to a north and arc in clockwise direction), measured in a reference point, in decimal degrees. :param float plons, plats: Longitudes and latitudes of points to measure distance. Either scalar values or numpy arrays of decimal degrees. :returns: Distance in km, a scalar value or numpy array depending on ``plons`` and ``plats``. A distance is negative if the target point lies on the right hand side of the arc. Solves a spherical triangle formed by reference point, target point and a projection of target point to a reference great circle arc. """ azimuth_to_target = azimuth(alon, alat, plons, plats) distance_to_target = geodetic_distance(alon, alat, plons, plats) # find an angle between an arc and a great circle arc connecting # arc's reference point and a target point t_angle = (azimuth_to_target - aazimuth + 360) % 360 # in a spherical right triangle cosine of the angle of a cathetus # augmented to pi/2 is equal to sine of an opposite angle times # sine of hypotenuse, see # http://en.wikipedia.org/wiki/Spherical_trigonometry#Napier.27s_Pentagon angle = numpy.arccos( (numpy.sin(numpy.radians(t_angle)) * numpy.sin(distance_to_target / EARTH_RADIUS)) ) return (numpy.pi / 2 - angle) * EARTH_RADIUS
e8868a2ce9125cc75e587a8a408f5b479b6a198a
3,658,404
def model_predict(test_data: FeatureVector): """ Endpoint to make a prediction with the model. The endpoint `model/train` should have been used before this one. Args: test_data (FeatureVector): A unit vector of feature """ try: y_predicted = api.ml_model.predict_proba(test_data.to_numpy()) except NotFittedError: raise HTTPException( status_code=500, detail="This LogisticRegression instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\nUse `model/train` endpoint with 10 examples before", ) y_pred_label = np.argmax(y_predicted, axis=1).astype(np.int32) y_pred_score = np.max(y_predicted, axis=1) return Prediction(label=y_pred_label, probability=y_pred_score)
c8b473d09092e03be85e986287350dd3115cf88d
3,658,405
def search_folders(project, folder_name=None, return_metadata=False): """Folder name based case-insensitive search for folders in project. :param project: project name :type project: str :param folder_name: the new folder's name :type folder_name: str. If None, all the folders in the project will be returned. :param return_metadata: return metadata of folders instead of names :type return_metadata: bool :return: folder names or metadatas :rtype: list of strs or dicts """ if not isinstance(project, dict): project = get_project_metadata_bare(project) team_id, project_id = project["team_id"], project["id"] result_list = [] params = { 'team_id': team_id, 'project_id': project_id, 'offset': 0, 'name': folder_name, 'is_root': 0 } total_folders = 0 while True: response = _api.send_request( req_type='GET', path='/folders', params=params ) if not response.ok: raise SABaseException( response.status_code, "Couldn't search folders " + response.text ) response = response.json() results_folders = response["data"] for r in results_folders: if return_metadata: result_list.append(r) else: result_list.append(r["name"]) total_folders += len(results_folders) if response["count"] <= total_folders: break params["offset"] = total_folders return result_list
cf8a9d95efcdb90d0891ef4ca588edf6375ed2af
3,658,407
def tempo_para_percorrer_uma_distancia(distancia, velocidade): """ Recebe uma distância e a velocidade de movimentação, e retorna as horas que seriam gastas para percorrer em linha reta""" horas = distancia / velocidade return round(horas,2)
e7754e87e010988284a6f89497bb1c5582ea0e85
3,658,408
import math def getCorrection(start, end, pos): """Correct the angle for the trajectory adjustment Function to get the correct angle correction when the robot deviates from it's estimated trajectory. Args: start: The starting position of the robot. end: The position the robot is supposed to arrive. pos: The current position of the robot. Returns: An angle in radians between -pi and pi to correct the robot trajectory and arrive succesfully at end position. """ (xs, ys) = start (xe, ye) = end (xp, yp) = pos # Discard edge cases with no sense assert(xs != xe or ys != ye) assert(xp != xe or yp != ye) assert(xs != xp or ys != yp) # First get the line equation from start to end points. # line equation follows the following pattern: y = m * x + b m = 0.0 b = 0.0 if abs(xe - xs) > PRECISION: m = (ye - ys) / (xe - xs) b = ys - m * xs else: m = 1 b = - xs # Get the perpendicular line equation to the first line mp = 0.0 bp = 0.0 if abs(xe - xs) < PRECISION: bp = yp elif abs(m) < PRECISION: mp = 1 bp = - xp else: mp = - 1 / m bp = yp - mp * xp # Get the point at the intersection of the two lines xi = 0.0 yi = 0.0 if abs(xe - xs) < PRECISION: xi = b yi = bp elif abs(m) < PRECISION: xi = bp yi = b else: xi = - (bp - b) / (mp - m) yi = m * xi + b # Get the distance between the tree points dist_pi = math.sqrt((xp - xi) * (xp - xi) + (yp - yi) * (yp - yi)) dist_pe = math.sqrt((xp - xe) * (xp - xe) + (yp - ye) * (yp - ye)) dist_sp = math.sqrt((xs - xp) * (xs - xp) + (ys - yp) * (ys - yp)) # Get the offset angles alpha and beta alpha = math.asin(dist_pi / dist_pe) beta = math.asin(dist_pi / dist_sp) return - (alpha + beta)
9f1073cb4c071abfecac20c85c56e5fb1638de6e
3,658,409
import logging def main(input_filepath, output_filepath): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ logger = logging.getLogger(__name__) logger.info('making final data set from raw data...') df = load_csv_file_to_df(input_filepath) df = handle_na_and_duplicates(df) df = clean_dataframe(df) df = organize_columns(df) df = concat_abilities(df) out_str = create_monsters_string(df) create_text_output_file(out_str, output_filepath) logger.info('Output file created!') return None
fe799a34f9cb5811228853469dbff92592a87e69
3,658,410
def string2symbols(s): """ Convert string to list of chemical symbols. Args: s: Returns: """ i = None n = len(s) if n == 0: return [] c = s[0] if c.isdigit(): i = 1 while i < n and s[i].isdigit(): i += 1 return int(s[:i]) * string2symbols(s[i:]) if c == "(": p = 0 for i, c in enumerate(s): if c == "(": p += 1 elif c == ")": p -= 1 if p == 0: break j = i + 1 while j < n and s[j].isdigit(): j += 1 if j > i + 1: m = int(s[i + 1 : j]) else: m = 1 return m * string2symbols(s[1:i]) + string2symbols(s[j:]) if c.isupper(): i = 1 if 1 < n and s[1].islower(): i += 1 j = i while j < n and s[j].isdigit(): j += 1 if j > i: m = int(s[i:j]) else: m = 1 return m * [s[:i]] + string2symbols(s[j:]) else: raise ValueError
1f08ba5c02536f4b67c9bd573c0dde8fbe46dc74
3,658,411
import csv from typing import Counter def get_dictionary(filename, dict_size=2000): """ Read the tweets and return a list of the 'max_words' most common words. """ all_words = [] with open(filename, 'r') as csv_file: r = csv.reader(csv_file, delimiter=',', quotechar='"') for row in r: tweet = row[3] if len(tweet) <= MAX_TWEET_CHARS: words = preprocess(tweet).split() all_words += words # Make the dictionary out of only the N most common words word_counter = Counter(all_words) dictionary, _ = zip(*word_counter.most_common(min(dict_size, len(word_counter)))) return dictionary
20917b0c9cda18d5436b438e0cdcf0c83d464899
3,658,413
def find_last_index(l, x): """Returns the last index of element x within the list l""" for idx in reversed(range(len(l))): if l[idx] == x: return idx raise ValueError("'{}' is not in list".format(x))
f787b26dd6c06507380bf2e336a58887d1f1f7ea
3,658,414
import requests import zipfile import io def download_query_alternative(user, password, queryid, batch_size=500): """ This is an alternative implementation of the query downloader. The original implementation only used a batch size of 20 as this allowed for using plain LOC files. Unfortunately this is a bit slow and causes more load on the web server due to a lot of small requests. With the modified implementation, the batch size can be chosen by the user. This is accomplished by using an in-memory extraction of the downloaded ZIP file. Additionally this code uses an XML parser instead of a regex to retrieve the data. :param user: The name of the user to log in with. :type user: str :param password: The password to use for the login. :type password: str :param queryid: The ID of the search query to retrieve the cache codes for. :type queryid: int :param batch_size: The batch size to use for the requests. This must at least be 1 and cannot exceed 500. The upper bound is due to the limits used by the Opencaching.de site. :type batch_size: int :return: The list of cache codes retrieved from the query. :rtype: list[str] :raises ValueError: Some of the input values are invalid. """ # Check the specified batch size. if not 0 < batch_size <= 500: raise ValueError("Invalid batch size.") # Use a custom header. headers = { "User-agent": "opencaching-de_statistics " + "[https://github.com/FriedrichFroebel/opencaching-de_statistics]" } # Try to log in. session = requests.Session() response = session.post( "https://www.opencaching.de/login.php", data={ "action": "login", "target": "query.php", "email": user.encode("utf-8"), "password": password.encode("utf-8"), }, headers=headers, ) # Check if the login has been successful. if "32x32-search.png" not in response.text: raise ValueError("Login failed (bad response).") # Prepare our status variables. oc_codes = [] batch_start = 0 while True: # Build the current URL, then retrieve the data. # In contrast to the original version, we enforce ZIP files here. url = ( f"https://www.opencaching.de/search.php?queryid={queryid}&output=loc" + f"&startat={batch_start}&count={batch_size}&zip=1" ) response = session.get(url, headers=headers) # Check if the request has been successful. # If there has been an error, return the list of OC codes found until now. if response.status_code != 200: print(f"-- Terminating due to bad status code: {response.status_code}") break # Check if we got a ZIP file (in fact this should always be the case). # The first check uses the magic number for non-empty ZIP archives. if response.text.startswith("PK\x03\x04") and not response.text.startswith( "<?xml" ): # This is a zip file, so uncompress it. zip_file = zipfile.ZipFile(io.BytesIO(response.content)) # The ZIP files normally have one file only, so we just retrieve the first # one here. files = zip_file.namelist() if files: filename = files[0] xml_data = zip_file.read(filename) # If this is not a ZIP file or the ZIP file has no content, assume that it has # been a plain XML file. if not xml_data: xml_data = response.text # Parse the XML data. tree = ElementTree.fromstring(xml_data) # Get the name tags from the XML tree and retrieve the ID attribute for this # tag. # If the ID attribute is missing, the corresponding entry will be `None`. new_oc_codes = [name.get("id") for name in tree.iter("name")] # Remove all the `None` elements. new_oc_codes = list(filter(None, new_oc_codes)) # We have reached the end of the results. if not new_oc_codes: break # Add the new codes to the existing list and move on to the next request. oc_codes = oc_codes + new_oc_codes batch_start += batch_size return oc_codes
2de7c3b453809c86093d1884438613985f7041b3
3,658,415
def parse_template(templ_str, event): """ Parses a template string and find the corresponding element in an event data structure. This is a highly simplified version of the templating that is supported by the Golang template code - it supports only a single reference to a sub element of the event structure. """ matches = TEMPLATE_RE.search(templ_str) tokens = matches.group(1).split('.') ref = event loc = [] for token in tokens: token = token.strip() # Skip the blank tokens if not token: continue if token not in ref: disp_loc = "event" + ''.join(["['{}']".format(_) for _ in loc]) err = "Could not find '{}' in {}".format(token, disp_loc) raise RuntimeError(err) ref = ref[token] loc.append(token) return ref
ec5c3822c390cbb4beff6428b91cd8b12157f2e3
3,658,416
import time def current_time_hhmm() -> str: """ Uses the time library to get the current time in hours and minutes Args: None Returns: str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min) (str): Current time formatted as hour:minutes """ logger.info('Getting current time') return str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min)
c7902ac8a8fb2528bacf6a5bc8459865604dd204
3,658,417
def configure(node): """ Generates the script to set the hostname in a node """ script = [] script.append(Statements.exec("hostname %s" % node.getName())) script.append(Statements.createOrOverwriteFile( "/etc/hostname", [node.getName()])) script.append(Statements.exec( "sed -i 's/127.0.0.1/127.0.0.1\t%s/' /etc/hosts" % node.getName())) return script
b0acf0f6a1363f1c7ad5a8e6dce6cb5d45586135
3,658,420
import random def processOptional(opt): """ Processes the optional element 50% of the time, skips it the other 50% of the time """ rand = random.random() if rand <= 0.5: return '' else: return processRHS(opt.option)
bda8130952f11f4df9342764d749dd6c93109d8e
3,658,421
def remove_non_paired_trials(df): """Remove non-paired trials from a dataset. This function will remove any trials from the input dataset df that do not have a matching pair. A matching pair are trial conditions A->B and B->A. """ # Define target combinations start_pos = np.concatenate(df['startPos'].to_numpy()) end_pos = np.concatenate(df['targPos'].to_numpy()) targ_comb = np.concatenate([start_pos, end_pos], axis=1) uni_targ_comb = np.unique(targ_comb, axis=0) # Convert target combinations to trial conditions start_cond = get_targ_cond(df['startPos']) end_cond = get_targ_cond(df['targPos']) targ_cond = [''.join([s, e]) for s, e in zip(start_cond, end_cond)] mask = get_targ_pairs(start_cond, end_cond) # Remove non-paired targets df = df[np.array(mask)] targ_cond = [tc for tc, m in zip(targ_cond, mask) if m] # Put other target information into a dict for easy access. This is # redundant and probably unnecessary, but is being done just in case this # information may be useful later on. targ_info = { 'start_pos': start_pos, 'end_pos': end_pos, 'targ_comb': targ_comb, 'uni_targ_comb': uni_targ_comb } return df, targ_cond, targ_info
30b5b86d9354c55dd2514114dc1180f397f2e56c
3,658,422
def compute_weighted_means_ds(ds, shp, ds_name='dataset', time_range=None, column_names=[], averager=False, df_output=pd.DataFrame(), output=None, land_only=False, time_stat=False, ): """ Compute spatial weighted mean of xr.Dataset Parameters ---------- ds: xr.DataSet shp: gp.GeoDataFrame gp.GeoDataFrame containing the information needed for xesmf's spatial averaging ds_name: str (optional) Name of the dataset will be written to the pd.DataFrame as an extra column time_range: list (optional) List containing start and end date to select from ``ds`` column_names: list (optional) Extra column names of the pd.DataFrame; the information is read from global attributes of ``ds`` averager: str, xesmf.SpatialAverager (optional) Use CORDEX domain name to calculate a xesmf.SpatialAverager object or use user-given one. df_output: pd.DataFrame (optional) pd.DataFrame to be concatenated with the newly created pd.DataFrame output: str (optional) Name of the output directory path or file land_only: bool (optional) Consider only land points\n !!!This is NOT implemented yet!!!\n As workaround write land sea mask in ``ds['mask']``. xesmf's spatial averager automatically considers ``ds['mask']``. time_stat: str or list (optional) Do some time statistics on ``ds``\n !!!This is NOT implemented yet!!! Returns ------- DataFrame : pd.DataFrame pandas Dataframe containing time series of spatial averages. Example ------- To calculate time series of spatial averages for several 'Bundeländer':\n - select Schleswig-Holstein, Hamburg, Bremen and Lower Saxony\n - Merge those regions to one new region calles NortSeaCoast\n - Select time slice from 2007 to 2009\n - Set CORDEX specific result DataFrame column names\n :: import xarray as xr import xweights as xw path = '/work/kd0956/CORDEX/data/cordex/output/EUR-11/CLMcom/MIROC-MIROC5/rcp85/r1i1p1/CLMcom-CCLM4-8-17/v1/mon/tas/v20171121/' netcdffile = path + 'tas_EUR-11_MIROC-MIROC5_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-201012.nc' ds = xr.open_dataset(netcdffile) df = xw.compute_weighted_means_ds(ds, 'states', subregions=['01_Schleswig-Holstein, '02_Hamburg', '03_Niedersachsen', '04_Bremen'], merge_column=['all', 'NorthSeaCoast'], time_range=['2007-01-01','2009-12-31'], column_names=['institute_id', 'driving_model_id', 'experiment_id', 'driving_model_ensemlbe_member', 'model_id', 'rcm_version_id'], ) """ if land_only: """ Not clear how to find right lsm file for each ds Then write lsm file to ds['mask'] The rest is done by xesmf """ NotImplementedError if not isinstance(ds, xr.Dataset): return df_output if time_range: ds = ds.sel(time=slice(time_range[0], time_range[1])) column_dict = {column:ds.attrs[column] if hasattr(ds, column) else None for column in column_names} try: out = spatial_averager(ds, shp, savg=averager) except: return df_output drop = [i for i in out.coords if not out[i].dims] out = out.drop(labels=drop) if time_stat: """ Not sure if it is usefull to implement here or do it seperately after using xweights """ NotImplementedError df_output = concat_dataframe(df_output, out, column_dict=column_dict, name=ds_name) if output: write_to_csv(df_output, output) return df_output
e575d17eefe8de66c0b6fd63abcf5d3bd6cac6ae
3,658,423
def action_remove(indicator_id, date, analyst): """ Remove an action from an indicator. :param indicator_id: The ObjectId of the indicator to update. :type indicator_id: str :param date: The date of the action to remove. :type date: datetime.datetime :param analyst: The user removing the action. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) if failed. """ indicator = Indicator.objects(id=indicator_id).first() if not indicator: return {'success': False, 'message': 'Could not find Indicator'} try: indicator.delete_action(date) indicator.save(username=analyst) return {'success': True} except ValidationError, e: return {'success': False, 'message': e}
806c818cd4c18624d9713a02d5c1826cab43a631
3,658,424
def repack_orb_to_dalton(A, norb, nclosed, nact, nvirt): """Repack a [norb, norb] matrix into a [(nclosed*nact) + (nclosed*nvirt) + (nact*nvirt)] vector for contraction with the CI Hamiltonian. """ assert norb == nclosed + nact + nvirt assert A.shape == (norb, norb) # These might be available in the global namespace, but this # function should work on its own. range_closed = list(range(0, nclosed)) range_act = list(range(nclosed, nclosed + nact)) range_virt = list(range(nclosed + nact, nclosed + nact + nvirt)) indices_rohf_closed_act = [(i, t) for i in range_closed for t in range_act] indices_rohf_closed_virt = [(i, a) for i in range_closed for a in range_virt] indices_rohf_act_virt = [(t, a) for t in range_act for a in range_virt] B = np.zeros( len(indices_rohf_closed_act) + len(indices_rohf_closed_virt) + len(indices_rohf_act_virt) ) for (i, t) in indices_rohf_closed_act: it = (t - nclosed) * nclosed + i B[it] += A[i, t] for (i, a) in indices_rohf_closed_virt: ia = i * nvirt + a - nclosed - nact + (nclosed * nact) B[ia] += A[i, a] for (t, a) in indices_rohf_act_virt: ta = (t - nclosed) * nvirt + a - nclosed - nact + (nclosed * nact) + (nclosed * nvirt) B[ta] += A[t, a] return B
05b356e9ded74c180d2a220f147cd69e91a5b597
3,658,425
def get_config(section="MAIN", filename="config.ini"): """ Function to retrieve all information from token file. Usually retrieves from config.ini """ try: config = ConfigParser() with open(filename) as config_file: config.read_file(config_file) return config[section] except FileNotFoundError: print("No configuration file found, check 'config_sample.ini'") raise FileNotFoundError
32d6c579b0ce002a601ea9041b54e9ce03858eb4
3,658,426
def _worst_xt_by_core(cores) -> float: """ Assigns a default worst crosstalk value based on the number of cores """ worst_crosstalks_by_core = {7: -84.7, 12: -61.9, 19: -54.8} # Cores: Crosstalk in dB worst_xt = worst_crosstalks_by_core.get(cores) # Worst aggregate intercore XT return worst_xt
331fdd7dc20db6909a6952483cfa9699f983a721
3,658,427
def _CheckUploadStatus(status_code): """Validates that HTTP status for upload is 2xx.""" return status_code / 100 == 2
d799797af012e46945cf413ff54d2ee946d364ba
3,658,428
def load(path: str, **kwargs) -> BELGraph: """Read a BEL graph. :param path: The path to a BEL graph in any of the formats with extensions described below :param kwargs: The keyword arguments are passed to the importer function :return: A BEL graph. This is the universal loader, which means any file path can be given and PyBEL will look up the appropriate load function. Allowed extensions are: - bel - bel.nodelink.json - bel.cx.json - bel.jgif.json The previous extensions also support gzipping. Other allowed extensions that don't support gzip are: - bel.pickle / bel.gpickle / bel.pkl - indra.json """ for extension, importer in IMPORTERS.items(): if path.endswith(extension): return importer(path, **kwargs) raise InvalidExtensionError(path=path)
871c7e3becac089758c94f7416def0020e63f9c1
3,658,429
from typing import Optional def smooth_l1_loss( prediction: oneflow._oneflow_internal.BlobDesc, label: oneflow._oneflow_internal.BlobDesc, beta: float = 1.0, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the smooth l1 loss. The equation is: .. math:: & out = \\frac{(\\beta*x)^2}{2}, \\left|x\\right|<\\frac{1}{{\\beta}^2} & out = \\left|x\\right|-\\frac{0.5}{{\\beta}^2}, otherwise Args: prediction (oneflow._oneflow_internal.BlobDesc): The prediction Blob label (oneflow._oneflow_internal.BlobDesc): The label Blob beta (float, optional): The :math:`\\beta` in the equation. Defaults to 1.0. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def smooth_l1_loss_Job(prediction: tp.Numpy.Placeholder((5, )), label: tp.Numpy.Placeholder((5, )) ) -> tp.Numpy: return flow.smooth_l1_loss(prediction=prediction, label=label) prediction = np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32) label = np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32) out = smooth_l1_loss_Job(prediction, label) # out [0.02 0.12499999 1.7 0.005 0.17999998] """ op = ( flow.user_op_builder( name if name is not None else id_util.UniqueStr("SmoothL1Loss_") ) .Op("smooth_l1_loss") .Input("prediction", [prediction]) .Input("label", [label]) .Output("loss") ) op.Attr("beta", float(beta)) return op.Build().InferAndTryRun().RemoteBlobList()[0]
ddebf5ba77ca8e4d2a964e5c86e05a0b61db9ded
3,658,430
def get_model_fields(model, concrete=False): # type: (Type[Model], Optional[bool]) -> List[Field] """ Gets model field :param model: Model to get fields for :param concrete: If set, returns only fields with column in model's table :return: A list of fields """ if not hasattr(model._meta, 'get_fields'): # Django 1.8+ if concrete: res = model._meta.concrete_fields else: res = model._meta.fields + model._meta.many_to_many else: res = model._meta.get_fields() if concrete: # Many to many fields have concrete flag set to True. Strange. res = [f for f in res if getattr(f, 'concrete', True) and not getattr(f, 'many_to_many', False)] return res
9e9172b2e606041c6f9dbf3a991e79d73518227f
3,658,431
def loss_fun(para): """ This is the loss function """ return -data_processing(my_cir(para))
5703755e3f5547be933f85224c103c58acbeaabb
3,658,432
def GetDynTypeMgr(): """Get the dynamic type manager""" return _gDynTypeMgr
7acf02dd2072ea819c847f53fbf11e68146b2400
3,658,433
def identifyEntity(tweet, entities): """ Identify the target entity of the tweet from the list of entities :param tweet: :param entities: :return: """ best_score = 0 # best score over all entities targetEntity = "" # the entity corresponding to the best score for word in tweet: for entity in entities: cur_score = 0 # the score for the current entity if word == entity: cur_score = 1 # set the current score to 1 in case the entity name is mentioned in the tweet for entity_related_word in entities[entity]: if word == entity_related_word: cur_score = cur_score + 1 # increment the current score by 1 in case a related term to # the current entity is mentioned in the tweet if cur_score > best_score: # update the best score and the target entity best_score = cur_score targetEntity = entity return targetEntity
d6825dfddf01706ee266e0f1c82128a42bcb8554
3,658,434
def _apply_D_loss(scores_fake, scores_real, loss_func): """Compute Discriminator losses and normalize loss values Arguments --------- scores_fake : list discriminator scores of generated waveforms scores_real : list discriminator scores of groundtruth waveforms loss_func : object object of target discriminator loss """ loss = 0 real_loss = 0 fake_loss = 0 if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): total_loss, real_loss, fake_loss = loss_func( score_fake=score_fake, score_real=score_real ) loss += total_loss real_loss += real_loss fake_loss += fake_loss # normalize loss values with number of scales (discriminators) # loss /= len(scores_fake) # real_loss /= len(scores_real) # fake_loss /= len(scores_fake) else: # single scale loss total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real) loss = total_loss return loss, real_loss, fake_loss
9432962af57193c07a268d00a3f1f01d372cb6a0
3,658,436
import tempfile def get_temp_dir(): """ Get path to the temp directory. Returns: str: The path to the temp directory. """ return fix_slashes( tempfile.gettempdir() )
3d0dd90c8187ac7b13913e7d4cd2b481c712fa6b
3,658,437
import random def pick_op(r, maxr, w, maxw): """Choose a read or a write operation""" if r == maxr or random.random() >= float(w) / maxw: return "write" else: return "read"
a45f53bf12538412b46f78e2c076966c26cf61ac
3,658,438
def sim_nochange(request): """ Return a dummy YATSM model container with a no-change dataset "No-change" dataset is simply a timeseries drawn from samples of one standard normal. """ X, Y, dates = _sim_no_change_data() return setup_dummy_YATSM(X, Y, dates, [0])
a39ba5824644764ae2aaf4e4d95c68d1c26bd132
3,658,439
from functools import reduce import operator def get_queryset_descendants(nodes, include_self=False, add_to_result=None): """ RUS: Запрос к базе данных потомков. Если нет узлов, то возвращается пустой запрос. :param nodes: список узлов дерева, по которым необходимо отыскать потомков :param include_self: признак включения в результ исходного спичка узлов :param add_to_result: список ключей узлов которые необходимо дополнительно включить в результат :return: список узлов (QuerySet), отсортированный в порядке обхода дерева """ if not nodes: # HACK: Emulate MPTTModel.objects.none(), because MPTTModel is abstract return EmptyQuerySet(MPTTModel) filters = [] model_class = nodes[0].__class__ if include_self: for n in nodes: if n.get_descendant_count(): lft, rght = n.lft - 1, n.rght + 1 filters.append(Q(tree_id=n.tree_id, lft__gt=lft, rght__lt=rght)) else: filters.append(Q(pk=n.pk)) else: for n in nodes: if n.get_descendant_count(): lft, rght = n.lft, n.rght filters.append(Q(tree_id=n.tree_id, lft__gt=lft, rght__lt=rght)) if add_to_result: if len(add_to_result) > 1: filters.append(Q(id__in=add_to_result)) else: filters.append(Q(pk=add_to_result[0])) if filters: return model_class.objects.filter(reduce(operator.or_, filters)) else: # HACK: Emulate model_class.objects.none() return model_class.objects.filter(id__isnull=True)
7de9fe6c146c9569bc78b714b75238b770f9157e
3,658,441
from operator import mul def op_mul(lin_op, args): """Applies the linear operator to the arguments. Parameters ---------- lin_op : LinOp A linear operator. args : list The arguments to the operator. Returns ------- NumPy matrix or SciPy sparse matrix. The result of applying the linear operator. """ # Constants convert directly to their value. if lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]: result = lin_op.data # No-op is not evaluated. elif lin_op.type is lo.NO_OP: return None # For non-leaves, recurse on args. elif lin_op.type is lo.SUM: result = sum(args) elif lin_op.type is lo.NEG: result = -args[0] elif lin_op.type is lo.MUL: coeff = mul(lin_op.data, {}) result = coeff*args[0] elif lin_op.type is lo.DIV: divisor = mul(lin_op.data, {}) result = args[0]/divisor elif lin_op.type is lo.SUM_ENTRIES: result = np.sum(args[0]) elif lin_op.type is lo.INDEX: row_slc, col_slc = lin_op.data result = args[0][row_slc, col_slc] elif lin_op.type is lo.TRANSPOSE: result = args[0].T elif lin_op.type is lo.CONV: result = conv_mul(lin_op, args[0]) elif lin_op.type is lo.PROMOTE: result = np.ones(lin_op.size)*args[0] elif lin_op.type is lo.DIAG_VEC: val = intf.from_2D_to_1D(args[0]) result = np.diag(val) else: raise Exception("Unknown linear operator.") return result
a1f770d2132fc9c3a60d4de3c3d87f59a03241eb
3,658,442
def comparator(x, y): """ default comparator :param x: :param y: :return: """ if x < y: return -1 elif x > y: return 1 return 0
53fc36f1afc3347689a1230c5ee3ba25d90f1239
3,658,443
def set_trait(age, age_risk_map, sex, sex_risk_map, race, race_risk_map): """ A trait occurs based on some mix of """ if age in age_risk_map: risk_from_age = age_risk_map[age] else: risk_from_age = 0 if sex in sex_risk_map: risk_from_sex = sex_risk_map[sex] else: risk_from_sex = 0 if race in race_risk_map: risk_from_race = race_risk_map[race] else: risk_from_race = 0 # probability of trait prob_trait = 1 - (1 - risk_from_age) * (1 - risk_from_sex) * (1 - risk_from_race) prob_not_trait = 1 - prob_trait resident_trait = np.random.choice(np.arange(1,3), p=[prob_not_trait,prob_trait]) return resident_trait
fe9f6c75ae4d7f80c2da86af4315b35fe29df482
3,658,444
def tidy_expression(expr, design=None): """Converts expression matrix into a tidy 'long' format.""" df_long = pd.melt( _reset_index( expr, name='gene'), id_vars=['gene'], var_name='sample') if design is not None: df_long = pd.merge( df_long, _reset_index( design, name='sample'), on='sample', how='left') return df_long
7c904e13a55f38cc05309b5927f2fdbb23c3f8c9
3,658,446
def get_optimizer(name): """Get an optimizer generator that returns an optimizer according to lr.""" if name == 'adam': def adam_opt_(lr): return tf.keras.optimizers.Adam(lr=lr) return adam_opt_ else: raise ValueError('Unknown optimizer %s.' % name)
8c97ee9f4b77d0fc80914ac7cbb49a448d48644a
3,658,448
from typing import List def get_multi(response: Response, common: dict = Depends(common_parameters)) -> List[ShopToPriceSchema]: """List prices for a shop""" query_result, content_range = shop_to_price_crud.get_multi( skip=common["skip"], limit=common["limit"], filter_parameters=common["filter"], sort_parameters=common["sort"], ) response.headers["Content-Range"] = content_range for result in query_result: result.half = result.price.half if result.price.half and result.use_half else None result.one = result.price.one if result.price.one and result.use_one else None result.two_five = result.price.two_five if result.price.two_five and result.use_two_five else None result.five = result.price.five if result.price.five and result.use_five else None result.joint = result.price.joint if result.price.joint and result.use_joint else None result.piece = result.price.piece if result.price.piece and result.use_piece else None return query_result
f97868e66c7743127d2d2951b732ff4c62708ae5
3,658,449
from datetime import datetime def send_crash(request, machine_config_info, crashlog): """ Save houdini crashes """ machine_config = get_or_save_machine_config( machine_config_info, get_ip_address(request), datetime.datetime.now()) save_crash(machine_config, crashlog, datetime.datetime.now()) return True
43e44950bdb4b6dc305bb1f36651daa31b4f813e
3,658,450
def apply_HAc_dense(A_C, A_L, A_R, Hlist): """ Construct the dense effective Hamiltonian HAc and apply it to A_C. For testing. """ d, chi, _ = A_C.shape HAc = HAc_dense(A_L, A_R, Hlist) HAc_mat = HAc.reshape((d*chi*chi, d*chi*chi)) A_Cvec = A_C.flatten() A_C_p = np.dot(HAc_mat, A_Cvec).reshape(A_C.shape) return A_C_p
b13f9db7287fcdf275e8f7c9a7fb542e7b79323c
3,658,452
def min_index(array, i, j): """Pomocna funkce pro razeni vyberem. Vrati index nejmensiho prvku v poli 'array' mezi 'i' a 'j'-1. """ index = i for k in range(i, j): if array[k] < array[index]: index = k return index
4c59362fac2e918ba5a0dfe9f6f1670b3e95d68c
3,658,453
def filterControlChars(value, replacement=' '): """ Returns string value with control chars being supstituted with replacement character >>> filterControlChars(u'AND 1>(2+3)\\n--') u'AND 1>(2+3) --' """ return filterStringValue(value, PRINTABLE_CHAR_REGEX, replacement)
a0f508d281f0c12311a5c2aa2f898def5eb38913
3,658,454
import csv def write_trt_rpc(cell_ID, cell_time, lon, lat, area, rank, hmin, hmax, freq, fname, timeformat='%Y%m%d%H%M'): """ writes the rimed particles column data for a TRT cell Parameters ---------- cell_ID : array of ints the cell ID cell_time : array of datetime the time step lon, lat : array of floats the latitude and longitude of the center of the cell area : array of floats the area of the cell rank : array of floats the rank of the cell hmin, hmax : array of floats Minimum and maximum altitude of the rimed particle column freq : array of floats Frequency of the species constituting the rime particle column within the limits of it fname : str file name where to store the data Returns ------- fname : str the name of the file where data has written """ hmin = hmin.filled(fill_value=get_fillvalue()) hmax = hmax.filled(fill_value=get_fillvalue()) freq = freq.filled(fill_value=get_fillvalue()) with open(fname, 'w', newline='') as csvfile: fieldnames = [ 'traj_ID', 'yyyymmddHHMM', 'lon', 'lat', 'area', 'RANKr', 'hmin', 'hmax', 'freq'] writer = csv.DictWriter(csvfile, fieldnames) writer.writeheader() for i, traj_ID_el in enumerate(cell_ID): writer.writerow({ 'traj_ID': traj_ID_el, 'yyyymmddHHMM': cell_time[i].strftime(timeformat), 'lon': lon[i], 'lat': lat[i], 'area': area[i], 'RANKr': rank[i], 'hmin': hmin[i], 'hmax': hmax[i], 'freq': freq[i] }) csvfile.close() return fname
fd634914a8c3d96d10d4dcc81514d492d6be899c
3,658,456
def get_tag(string: str) -> Tag: """Получить тему.""" return Tag.objects.get(tag=string)
816bbaecc4cf45e2fc75b1e428842b5502a353bc
3,658,457
def average_precision(gt, pred): """ Computes the average precision. This function computes the average prescision at k between two lists of items. Parameters ---------- gt: set A set of ground-truth elements (order doesn't matter) pred: list A list of predicted elements (order does matter) Returns ------- score: double The average precision over the input lists """ if not gt: return 0.0 score = 0.0 num_hits = 0.0 for i,p in enumerate(pred): if p in gt and p not in pred[:i]: num_hits += 1.0 score += num_hits / (i + 1.0) return score / max(1.0, len(gt))
ca265471d073b6a0c7543e24ef0ba4f872737997
3,658,458
import math def rotate_coo(x, y, phi): """Rotate the coordinates in the *.coo files for data sets containing images at different PAs. """ # Rotate around center of image, and keep origin at center xin = 512. yin = 512. xout = 512. yout = 512. cos = math.cos(math.radians(phi)) sin = math.sin(math.radians(phi)) xrot = (x - xin) * cos - (y - yin) * sin + xout yrot = (x - xin) * sin + (y - yin) * cos + yout return [xrot, yrot]
a57a4c36119e96d757bd23f28a0790f6d68661fc
3,658,459
def ip_block_array(): """ Return an ipBlock array instance fixture """ return ['10.0.0.1', '10.0.0.2', '10.0.0.3']
c74756f34b97d2550cb238bd63e0c9505f3935d3
3,658,460
from pathlib import Path import joblib def load_model(model_name, dir_loc=None, alive_bar_on=True): """Load local model_name=model_s if present, else fetch from hf.co.""" if dir_loc is None: dir_loc = "" dir_loc = Path(dir_loc).absolute().as_posix() file_loc = f"{dir_loc}/{model_name}" if Path(file_loc).exists(): if alive_bar_on: with alive_bar( 1, title=f" Loading {dir_loc}/{model_name}, takes ~30 secs ...", length=3, ) as progress_bar: model = joblib.load(file_loc) # model_s = pickle.load(open(file_loc, "rb")) progress_bar() # pylint: disable=not-callable else: logger.info("Loading %s/%s, takes ~30 secs ...", dir_loc, model_name) model = joblib.load(file_loc) else: logger.info( "Fetching and caching %s from huggingface.co... " "The first time may take a while depending on your net.", model_name, ) if alive_bar_on: with alive_bar( 1, title=" Subsequent loading takes ~2-3 secs ...", length=3 ) as progress_bar: try: model = joblib.load( cached_download(hf_hub_url("mikeee/model_s", model_name)) ) except Exception as exc: logger.error(exc) raise progress_bar() # pylint: disable=not-callable else: try: model = joblib.load( cached_download(hf_hub_url("mikeee/model_s", model_name)) ) except Exception as exc: logger.error(exc) raise return model
1847e061c6980fd4fd185f79d48682cbf7cb14ff
3,658,461
from typing import Generator def get_dev_requirements() -> Generator: """Yield package name and version for Python developer requirements.""" return get_versions("DEVELOPMENT")
728658648d6bce6fecbf4c1bc6b6de42c315b3c0
3,658,462
def _ndb_key_to_cloud_key(ndb_key): """Convert a ndb.Key to a cloud entity Key.""" return datastore.Key( ndb_key.kind(), ndb_key.id(), project=utils.get_application_id())
ce71b0d13f2e37ded12bf87ad133492a9b68d0c7
3,658,463
def inference(H, images, train=True): """Build the MNIST model up to where it may be used for inference. Parameters ---------- images: Images placeholder, from inputs(). train: whether the network is used for train of inference Returns ------- softmax_linear: Output tensor with the computed logits. """ num_filter_1 = 32 num_filter_2 = 64 # First Convolutional Layer with tf.variable_scope('Conv1') as scope: # Adding Convolutional Layers W_conv1 = weight_variable( 'weights', [5, 5, H['arch']['num_channels'], num_filter_1]) b_conv1 = bias_variable('biases', [num_filter_1]) h_conv1 = tf.nn.relu( conv2d(images, W_conv1) + b_conv1, name=scope.name) _activation_summary(h_conv1) # First Pooling Layer h_pool1 = max_pool_2x2(h_conv1, name='pool1') # Second Convolutional Layer with tf.variable_scope('Conv2') as scope: W_conv2 = weight_variable( 'weights', [5, 5, num_filter_1, num_filter_2]) b_conv2 = bias_variable('biases', [num_filter_2]) h_conv2 = tf.nn.relu( conv2d(h_pool1, W_conv2) + b_conv2, name=scope.name) _activation_summary(h_conv2) # Second Pooling Layer h_pool2 = max_pool_2x2(h_conv2, name='pool2') # Find correct dimension dim = 1 for d in h_pool2.get_shape()[1:].as_list(): dim *= d # Adding Fully Connected Layers with tf.variable_scope('fc1') as scope: W_fc1 = weight_variable('weights', [dim, 1024]) b_fc1 = bias_variable('biases', [1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, dim]) h_fc1 = tf.nn.relu( tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name=scope.name) _activation_summary(h_fc1) # Adding Dropout if train: h_fc1 = tf.nn.dropout(h_fc1, 0.5, name='dropout') with tf.variable_scope('logits') as scope: W_fc2 = weight_variable('weights', [1024, H['arch']['num_classes']]) b_fc2 = bias_variable('biases', [H['arch']['num_classes']]) logits = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, name=scope.name) _activation_summary(logits) return logits
bf7e0f60bdc85d52fb6778cc40eedaa63c0387e3
3,658,464
def UniqueLattice(lattice_vectors,ind): """ Takes a list with two tuples, each representing a lattice vector and a list with the genes of an individual. Returns a list with two tuples, representing the equivalent lattice vectors with the smallest cell circunference. """ x_1 = lattice_vectors(0,ind) x_2 = lattice_vectors(1,ind) lattices = [[(x_1[0]+x_2[0] if (x_1[0]+x_2[0]) > 0 else (x_1[0]-x_2[0]), x_1[1]+x_2[1] if (x_1[1]+x_2[1]) > 0 else x_1[1]-x_2[1]) ,x_2], [(x_1[0]-x_2[0] if (x_1[0]-x_2[0]) > 0 else x_1[0]+x_2[0], x_1[1]-x_2[1] if (x_1[1]-x_2[1]) > 0 else x_1[1]+x_2[1]) ,x_2], [x_1, (x_1[0]+x_2[0] if (x_1[0]+x_2[0]) > 0 else x_1[0]-x_2[0], x_1[1]+x_2[1] if (x_1[1]+x_2[1]) > 0 else x_1[1]-x_2[1])], [x_1, (x_1[0]-x_2[0] if (x_1[0]-x_2[0]) > 0 else x_1[0]+x_2[0], x_1[1]-x_2[1] if (x_1[1]-x_2[1]) > 0 else x_1[1]+x_2[1])]] lattice_radius = [] for lat in lattices: point_1 = lat[0] point_2 = lat[1] m_a = (point_2[1]-point_1[1])/(point_2[0]-point_1[0]) m_b = point_2[1]/point_2[0] x = (m_a*m_b*(point_1[1]) + m_b*(point_1[0]+point_2[0]) - m_a*(point_2[0])) / 2*(m_b-m_a) y = (-1 / m_a) * (x - (point_1[0]-point_2[1])/2) + (point_1[1]-point_2[1])/2 radius_1 = np.sqrt((x-point_1[0])**2 + (y-point_1[1])**2) radius_2 = np.sqrt((x-point_2[0])**2 + (y-point_2[1])**2) if radius_1 >= radius_2: lattice_radius.append(radius_1) else: lattice_radius.append(radius_2) return lattices[lattice_radius.index(min(lattice_radius))]
e2474a54cf3351ff112ecb6d139eec8eac2ef1fa
3,658,466
def register_errors(app: Flask): """注册需要的错误处理程序包到 Flask 程序实例 app 中""" @app.errorhandler(400) # Bad Request 客户端请求的语法错误,服务器无法理解 def bad_request(e): return render_template('error.html', description=e.description, code=e.code), 400 @app.errorhandler(404) # Not Found 服务器无法根据客户端的请求找到资源(网页) def page_not_found(e): return render_template('error.html', description=e.description, code=e.code), 404 @app.errorhandler(500) # Internal Server Error 服务器内部错误,无法完成请求 def internal_server_error(e): return render_template('error.html', description="服务器内部错误,无法完成请求!", code="500"), 500 @app.errorhandler(CSRFError) # CSRF 验证失败 def csrf_error_handle(e): return render_template('error.html', description=e.description, code=e.code), 400
27634a139aab88215b77e53a25758d6096571a09
3,658,467
def websafe_encode(data): """Encodes a byte string into websafe-base64 encoding. :param data: The input to encode. :return: The encoded string. """ return urlsafe_b64encode(data).replace(b'=', b'').decode('ascii')
ed5b06d2fab3dcc64275cb0046cabd88f63894ec
3,658,468
from typing import Union def gravatar(email: Union[str, list]) -> str: """Converts the e-mail address provided into a gravatar URL. If the provided string is not a valid e-mail address, this function just returns the original string. Args: email: e-mail address to convert. Returns: Gravatar URL, or None if the e-mail address is not valid. """ if email is None: email = [] elif isinstance(email, str): email = [email] email.sort() for _email in email: if validators.email(_email): return gravatar_url(_email) return None
8807eefd40472068310455c1c477933dbaa67be0
3,658,469
def bar_2_MPa(value): """ converts pressure in bar to Pa :param value: pressure value in bar :return: pressure value in Pa """ return value * const.bar / const.mega
d6c8084a6603f74bd1fb11739e4f4d9100cf14de
3,658,470
def walk(x, y, model, theta, conditions=None, var2=0.01, mov=100, d=1, tol=1e-3, mode=True): """Executes the walker implementation. Parameters ---------- x : np.ndarray An $(m, n)$ dimensional array for (cols, rows). y : np.ndarray An $n$ dimensional array that will be compared with model's output. model : function A Python function defined by the user. This function should recieve two arguments $(x, theta)$. theta : np.ndarray The array containing the model's parameters. conditions : list A list containing $2n$-conditions for the (min, max) range of the $n$ parameters. var2 : float Determines the step size of the walker. By default it is set to `1.0`. mov : int Number of movements that walker will perform. By default it is set to `100`. d : float Size of the Gaussian step for the walker. tol : float Convergence criteria for the log-likelihhod. By default it is set to `1e-3`. mode : bool By default it is set to `True`. Returns ------- theta : np.array An ndarray with the updated theta values. nwalk : np.array Updates of theta for each movement performed by the walker. y0 : float The log-likelihood value. """ greach = False nwalk = [] for i in range(mov): nwalk.append(theta) theta_new = update_theta(theta, d) if not greach: y0 = fun_like(x, y, model, theta, conditions, var2) y1 = fun_like(x, y, model, theta_new, conditions, var2) if y0 <= tol and mode: print('Goal reached!') greach = True return theta, nwalk, y0 else: if y1 <= tol and mode: print('Goal reached!') greach = True return theta_new, nwalk, y1 else: ratio = y0 / y1 boltz = np.random.rand(1) prob = np.exp(-ratio) if y1 < y0: theta = theta_new theta_new = update_theta(theta, d) else: if prob > boltz: theta = theta_new theta_new = update_theta(theta, d) else: theta_new = update_theta(theta, d) if mode: print('Maximum number of iterations reached!') print(f'The log-likelihood is: {y0}') return theta, nwalk, y0
ef7386f4c7141edfcdeb041b47d741e186f207e2
3,658,471
def izbor_letov(): """Glavna stran.""" # Iz cookieja dobimo uporabnika in morebitno sporočilo (username, ime, priimek) = get_potnik() c.execute("SELECT distinct drzava FROM lokacija ORDER BY drzava") drzave=c.fetchall() drzava_kje = bottle.request.forms.drzava_kje mesto_kje = bottle.request.forms.mesto_kje letalisce_kje = bottle.request.forms.letalisce_kje drzava_kam = bottle.request.forms.drzava_kam mesto_kam = bottle.request.forms.mesto_kam letalisce_kam = bottle.request.forms.letalisce_kam if "None" in [drzava_kje, mesto_kje, letalisce_kje, drzava_kam, mesto_kam, letalisce_kam]: return bottle.template("main.html", ime=ime, username=username, napaka="Prosimo, izpolnete vsa polja!", drzave=drzave) elif letalisce_kje==letalisce_kam: return bottle.template("main.html", ime=ime, username=username, napaka="Začetno in končno letališče se morata razlikovati, prosimo ponovno izpolnite obrazec.", drzave=drzave) else: izbor = get_leti(letalisce_kje, letalisce_kam, drzava_kje, drzava_kam) leti_mesto = get_leti_mesto(mesto_kje, drzava_kje, mesto_kam, drzava_kam) leti_mesto_drzava = get_leti_mesto_drzava(mesto_kje, drzava_kje, mesto_kam, drzava_kam) if izbor == []: return bottle.template("leti.html", ime=ime, username=username, letalisce_kje=letalisce_kje, letalisce_kam=letalisce_kam, napaka="Za relacijo \""+letalisce_kje+" ("+mesto_kje+", "+drzava_kje+") : "+letalisce_kam+" ("+mesto_kam+", "+drzava_kam+")\" ni znanih letov. "+" "+"Poizkusite ponovno s kakterim drugim letališčem v bližini.", leti_mesto=leti_mesto, leti_mesto_drzava=leti_mesto_drzava, izbor=izbor) else: return bottle.template("leti.html", ime=ime, username=username, letalisce_kje=letalisce_kje, letalisce_kam=letalisce_kam, napaka=None, leti_mesto_drzava=leti_mesto_drzava, izbor=izbor, leti_mesto=leti_mesto)
664de2c3cf2507ac43efa22105a51b1e14ad441a
3,658,472
def generate_data_from_cvs(csv_file_paths): """Generate data from list of csv_file_paths. csv_file_paths contains path to CSV file, column_name, and its label `csv_file_paths`: A list of CSV file path, column_name, and label """ data = [] for item in csv_file_paths: values = read_csv(item[0], item[1]) data.append([ item[2], values ]) return data
1c9f393a18edc9c2fcc3f28cdbeb71fb9c006731
3,658,473
import math import torch def log_density_gaussian(x, mu, logvar): """Calculates log density of a gaussian. Parameters ---------- mu: torch.Tensor or np.ndarray or float Mean. logvar: torch.Tensor or np.ndarray or float Log variance. """ normalization = - 0.5 * (math.log(2 * math.pi) + logvar) inv_var = torch.exp(-logvar) log_density = normalization - 0.5 * ((x - mu)**2 * inv_var) return log_density
3fdc751aa58b3ec82e1aa454f593879d5da4c310
3,658,474
def invalid_hexadecimal(statement): """Identifies problem caused by invalid character in an hexadecimal number.""" if statement.highlighted_tokens: # Python 3.10 prev = statement.bad_token wrong = statement.next_token else: prev = statement.prev_token wrong = statement.bad_token if not (prev.immediately_before(wrong) and prev.string.lower().startswith("0x")): return {} hint = _("Did you made a mistake in writing an hexadecimal integer?\n") cause = _( "It looks like you used an invalid character (`{character}`) in an hexadecimal number.\n\n" "Hexadecimal numbers are base 16 integers that use the symbols `0` to `9`\n" "to represent values 0 to 9, and the letters `a` to `f` (or `A` to `F`)\n" "to represent values 10 to 15.\n" "In Python, hexadecimal numbers start with either `0x` or `0X`,\n" "followed by the characters used to represent the value of that integer.\n" ).format(character=wrong.string[0]) return {"cause": cause, "suggest": hint}
a0b252001dd1f0f466302a131c2a460743a8c197
3,658,475
def get_pool_name(pool_id): """Returns AS3 object name for TLS profiles related to pools :param pool_id: octavia pool id :return: AS3 object name """ return "{}{}".format(constants.PREFIX_TLS_POOL, pool_id)
2a850d48f52d822712cdfc3543532c9b0dd80fd6
3,658,476
def search_sliceable_by_yielded_chunks_for_str(sliceable, search_string, starting_index, down, case_insensitive): """This is the main entry point for everything in this module.""" for chunk, chunk_start_idx in search_chunk_yielder(sliceable, starting_index, down): found_at_chunk_idx = search_list_for_str(chunk, search_string, 0 if down else len(chunk) - 1, down, case_insensitive) if found_at_chunk_idx is not None: return found_at_chunk_idx + chunk_start_idx return None
7179179403098cd1d3993a35cf59c9162384ac4d
3,658,477
def split_page(array, limit, index): """ 按限制要求分割数组,返回下标所指向的页面 :param array: 需要分割的数组 :param limit: 每个数组的大小 :param index: 需要返回的分割后的数组 :return: 数组 """ end = index * limit start = end - limit return array[start:end]
ecce83d6e2e09d47e124536f294ece1e1631e6b6
3,658,478
def creatKdpCols(mcTable, wls): """ Create the KDP column Parameters ---------- mcTable: output from getMcSnowTable() wls: wavelenght (iterable) [mm] Returns ------- mcTable with an empty column 'sKDP_*' for storing the calculated KDP of a given wavelength. """ for wl in wls: wlStr = '{:.2e}'.format(wl) mcTable['sKDP_{0}'.format(wlStr)] = np.ones_like(mcTable['time'])*np.nan return mcTable
9adc20c1ff94778bec4551156b5774863eb2203f
3,658,479
def get_products_by_user(user_openid, allowed_keys=None, filters=None): """Get all products that user can manage.""" return IMPL.get_products_by_user(user_openid, allowed_keys=allowed_keys, filters=filters)
458664aa75c5b423ccfb2a80287c565cae51e0d0
3,658,480
def sample_from_ensemble(models, params, weights=None, fallback=False, default=None): """Sample models in proportion to weights and execute with model_params. If fallback is true then call different model from ensemble if the selected model throws an error. If Default is not None then return default if all models fail """ if len(models) > 1: model = ergo.random_choice(models, weights) else: model = models[0] try: result = model(**params) if np.isnan(result): raise KeyError return result except (KeyError, IndexError): if fallback and len(models) > 1: models_copy = models.copy() weights_copy = weights.copy() i = models.index(model) del models_copy[i] del weights_copy[i] return sample_from_ensemble( models_copy, params, weights_copy, fallback, default ) return default
c771108cb36cff2cb48af22a9efaad749d267ce0
3,658,481
def Flatten(matrix): """Flattens a 2d array 'matrix' to an array.""" array = [] for a in matrix: array += a return array
00389b4dd295274d8081331d6ae78f233f0b5b59
3,658,482
def create_verification_token( data: dict ) -> VerificationTokenModel: """ Save a Verification Token instance to database. Args: data (dictionary): Returns: VerificationToken: Verification Token entity of VerificationTokenModel object Raises: None """ orm_verification_token = VerificationTokenModel( user_id=data.get('user_id'), token_type=data.get('token_type', 'SMS'), token=True ) orm_verification_token.save() return orm_verification_token
9008bc298c8e8075031f7e14e8cb0f288e894869
3,658,483
from typing import Union from typing import Sequence from typing import Tuple def _find_highest_cardinality(arrays: Union[int, Sequence, np.ndarray, Tuple]) -> int: """Find the highest cardinality of the given array. Args: arrays: a list of arrays or a single array Returns: The highest cardinality of the given array. """ return max([len(array) for array in arrays if hasattr(array, "__len__")] + [1])
abe9ad85ffabb88f9097b9c2de97319f1342f586
3,658,484
def rowmap(table, rowmapper, header, failonerror=False): """ Transform rows via an arbitrary function. E.g.:: >>> import petl as etl >>> table1 = [['id', 'sex', 'age', 'height', 'weight'], ... [1, 'male', 16, 1.45, 62.0], ... [2, 'female', 19, 1.34, 55.4], ... [3, 'female', 17, 1.78, 74.4], ... [4, 'male', 21, 1.33, 45.2], ... [5, '-', 25, 1.65, 51.9]] >>> def rowmapper(row): ... transmf = {'male': 'M', 'female': 'F'} ... return [row[0], ... transmf[row['sex']] if row['sex'] in transmf else None, ... row.age * 12, ... row.height / row.weight ** 2] ... >>> table2 = etl.rowmap(table1, rowmapper, ... header=['subject_id', 'gender', 'age_months', ... 'bmi']) >>> table2 +------------+--------+------------+-----------------------+ | subject_id | gender | age_months | bmi | +============+========+============+=======================+ | 1 | 'M' | 192 | 0.0003772112382934443 | +------------+--------+------------+-----------------------+ | 2 | 'F' | 228 | 0.0004366015456998006 | +------------+--------+------------+-----------------------+ | 3 | 'F' | 204 | 0.0003215689675106949 | +------------+--------+------------+-----------------------+ | 4 | 'M' | 252 | 0.0006509906805544679 | +------------+--------+------------+-----------------------+ | 5 | None | 300 | 0.0006125608384287258 | +------------+--------+------------+-----------------------+ The `rowmapper` function should accept a single row and return a single row (list or tuple). """ return RowMapView(table, rowmapper, header, failonerror=failonerror)
dabceae8171330d3f8c4cdba7b50be2106ad1438
3,658,486
def squeeze(dataset, how: str = 'day'): """ Squeezes the data in dataset by close timestamps Args: dataset (DataFrame) - the data to squeeze how (str) - one of 'second', 'minute', 'hour', 'day', 'month' (default day) Returns: dataset (DataFrame) - a dataframe where the indexes are squeezed together by closely related timestamps determined by parameter how """ return dataset.groupby(by = lambda ts: timestamp_floor(ts, how = how))
e41cbc4e054218b1f88ed0745fcc980df29ac8d4
3,658,487
def callback(): """ Process response for "Login" try from Dropbox API. If all OK - redirects to ``DROPBOX_LOGIN_REDIRECT`` url. Could render template with error message on: * oAuth token is not provided * oAuth token is not equal to request token * Error response from Dropbox API Default template to render is ``'dropbox/callback.html'``, you could overwrite it with ``DROPBOX_CALLBACK_TEMPLATE`` config var. """ # Initial vars dropbox = current_app.extensions['dropbox'] template = dropbox.DROPBOX_CALLBACK_TEMPLATE or 'dropbox/callback.html' # Get oAuth token from Dropbox oauth_token = request.args.get('oauth_token') if not oauth_token: return render_template(template, error_oauth_token=True) # oAuth token **should** be equal to stored request token try: key, secret = session.get(DROPBOX_REQUEST_TOKEN_KEY) or (None, None) except ValueError: return render_template(template, error_request_token=True) if oauth_token != key: return render_template(template, error_not_equal_tokens=True) # Do login with current request token try: dropbox.login(OAuthToken(key, secret)) except ErrorResponse as e: return render_template(template, error_response=True, error=e) # Redirect to resulted page redirect_to = safe_url_for(dropbox.DROPBOX_LOGIN_REDIRECT or '/') return redirect(redirect_to)
8b35d67d065a5ec65606b6e505cfccc51460fe1c
3,658,488
def get_ws_param(args, attr): """get the corresponding warm start parameter, if it is not exists, use the value of the general parameter""" assert hasattr(args, attr), 'Invalid warm start parameter!' val = getattr(args, attr) if hasattr(args, 'ws_' + attr): ws_val = getattr(args, 'ws_' + attr) if isinstance(ws_val, str): ws_val = ws_val.strip() if ws_val or isinstance(ws_val, list) or isinstance(ws_val, int) or isinstance(ws_val, float): val = ws_val return val
ea1d762654153602f8ad54048e54995c26304e40
3,658,489
def _redundant_relation(lex: lmf.Lexicon, ids: _Ids) -> _Result: """redundant relation between source and target""" redundant = _multiples(chain( ((s['id'], r['relType'], r['target']) for s, r in _sense_relations(lex)), ((ss['id'], r['relType'], r['target']) for ss, r in _synset_relations(lex)), )) return {src: {'type': typ, 'target': tgt} for src, typ, tgt in redundant}
cc32c55a35cd7056a249ad05bd0b483af18fcd3a
3,658,490