content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def write_results(conn, cursor, mag_dict, position_dict): """ Write star truth results to the truth table Parameters ---------- conn is a sqlite3 connection to the database cursor is a sqlite3.conneciton.cursor() object mag_dict is a dict of mags. It is keyed on the pid of the Process used to process a chunk of magnitudes. Each value is a 2-D numpy array of shape (n_obj, n_bandpasses). It is produced by calculate_magnitudes. position_dict is a dict keyed on pid of the Process used to process a chunk of stars. The values are also dicts, these keyed on 'healpix', 'ra', 'dec', 'id' with the values being arrays of those quantities for the corresponding chunk of stars. Returns ------- None Just writes to the database """ assert len(mag_dict) == len(position_dict) row_ct = 0 for k in mag_dict.keys(): mm = mag_dict[k] pp = position_dict[k] row_ct += len(pp['ra']) if len(mm) != len(pp['ra']): raise RuntimeError('%d mm %d pp' % (len(mm), len(pp['ra']))) values = ((int(pp['healpix'][i_obj]), int(pp['id'][i_obj]), 1, 0, 0, pp['ra'][i_obj], pp['dec'][i_obj], 0.0, mm[i_obj][0], mm[i_obj][1], mm[i_obj][2], mm[i_obj][3], mm[i_obj][4], mm[i_obj][5]) for i_obj in range(len(pp['ra']))) cursor.executemany('''INSERT INTO truth VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', values) conn.commit() return row_ct
0b0c9234a32050277a7e70fee3ab7ba1be5931bb
3,657,912
def get_sparameters(sim: td.Simulation) -> np.ndarray: """Adapted from tidy3d examples. Returns full Smatrix for a component https://support.lumerical.com/hc/en-us/articles/360042095873-Metamaterial-S-parameter-extraction """ sim = run_simulation(sim).result() def get_amplitude(monitor): f, b = sim.data(monitor)["mode_amps"] return np.squeeze(f), np.squeeze(b) monitors = sim.monitors n = len(monitors) - 1 S = np.zeros((n, n), dtype=np.complex128) # for i, monitor_i in enumerate(monitors): # for j, monitor_j in enumerate(monitors): # if i > 0 and j > 0: # if monitor_i.name.startswith("W"): # ai, bi = get_amplitude(monitor_i) # else: # bi, ai = get_amplitude(monitor_i) # if monitor_j.name.startswith("W"): # aj, bj = get_amplitude(monitor_j) # else: # bj, aj = get_amplitude(monitor_j) # S[i - i, j - 1] = bi / aj if len(monitors) == 5: _, incident, reflect, top, bot = monitors S[0, 0] = get_amplitude(incident)[-1] S[1, 0] = get_amplitude(reflect)[-1] S[0, 1] = get_amplitude(top)[0] S[1, 1] = get_amplitude(bot)[0] elif len(monitors) == 3: _, incident, reflect = monitors S[0, 0] = S[1, 1] = get_amplitude(incident)[-1] S[1, 0] = S[0, 1] = get_amplitude(reflect)[-1] return S
6577fac645e195c4e30406c6252c9b55831343a0
3,657,913
def mapmri_STU_reg_matrices(radial_order): """ Generates the static portions of the Laplacian regularization matrix according to [1]_ eq. (11, 12, 13). Parameters ---------- radial_order : unsigned int, an even integer that represent the order of the basis Returns ------- S, T, U : Matrices, shape (N_coef,N_coef) Regularization submatrices References ---------- .. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation using Laplacian-regularized MAP-MRI and its application to HCP data." NeuroImage (2016). """ S = np.zeros((radial_order + 1, radial_order + 1)) for i in range(radial_order + 1): for j in range(radial_order + 1): S[i, j] = map_laplace_s(i, j) T = np.zeros((radial_order + 1, radial_order + 1)) for i in range(radial_order + 1): for j in range(radial_order + 1): T[i, j] = map_laplace_t(i, j) U = np.zeros((radial_order + 1, radial_order + 1)) for i in range(radial_order + 1): for j in range(radial_order + 1): U[i, j] = map_laplace_u(i, j) return S, T, U
40cb1159f04d1291e06146dabd89380936c407a0
3,657,914
def _checker(word: dict): """checks if the 'word' dictionary is fine :param word: the node in the list of the text :type word: dict :return: if "f", "ref" and "sig" in word, returns true, else, returns false :rtype: bool """ if "f" in word and "ref" in word and "sig" in word: return True return False
ee6ec5a7ee393ddcbc97b13f6c09cdd9019fb1a6
3,657,915
def construc_prob(history, window, note_set, model, datafilename): """ This function constructs the proabilities of seeing each next note Inputs: history, A list of strings, the note history in chronological order window, and integer how far back we are looking note_set, the set of notes to be considered model, the model used to construct probabilities datafilename, a string, the name of the file containing the information to convert strings of notes to interaction dummies Outputs: A list of probabilities of len(note_set) """ recent_history = history[len(history)-window + 1:len(history)] like_prob = [] # Initialize a empty list of probabilities of liking a certain sequence for note in note_set: potential_hist = recent_history + [note] X = create_X(potential_hist, datafilename) # print(potential_hist) # print(model(X)) like_prob.append(model(X)) return selection_prob(like_prob)
92e75d386c5fce984302ca60f80b2dc1891fc873
3,657,916
def renderPybullet(envs, config, tensor=True): """Provides as much images as envs""" if type(envs) is list: obs = [ env_.render( mode="rgb_array", image_size=config["image_size"], color=config["color"], fpv=config["fpv"], camera_id=0, ) for env_ in envs ] obs = np.array(obs).transpose(0, 3, 1, 2) / 255.0 else: obs = envs.render( mode="rgb_array", image_size=config["image_size"], color=config["color"], fpv=config["fpv"], camera_id=0, ) obs = obs.transpose(2, 0, 1) / 255.0 if tensor: obs = obs[None] return obs
fb04ecda7e0dbfbe7899d4684979828b3fcd83c6
3,657,917
def wifi(request): """Collect status information for wifi and return HTML response.""" context = { 'refresh': 5, 'item': '- Wifi', 'timestamp': timestamp(), 'wifi': sorted(Wifi().aps), } return render(request, 'ulm.html', context)
0a5412c2912eaeae192dd6d5fe85d336dec1b169
3,657,918
def genModel( nChars, nHidden, numLayers = 1, dropout = 0.5, recurrent_dropout = 0.5 ): """Generates the RNN model with nChars characters and numLayers hidden units with dimension nHidden.""" model = Sequential() model.add( LSTM( nHidden, input_shape = (None, nChars), return_sequences = True, dropout = dropout, recurrent_dropout = recurrent_dropout ) ) for _ in range( numLayers - 1 ): model.add( LSTM( nHidden, return_sequences = True, dropout = dropout, recurrent_dropout = recurrent_dropout ) ) model.add( TimeDistributed( Dense(nChars) ) ) model.add( Activation('softmax') ) model.compile( loss = "categorical_crossentropy", optimizer = "adam" ) return model
4aeef47b8a4948e37eaa2ea07ac22ecee167df51
3,657,919
def rotate_system(shape_list, angle, center_point = None): """Rotates a set of shapes around a given point If no center point is given, assume the center of mass of the shape Args: shape_list (list): A list of list of (x,y) vertices angle (float): Angle in radians to rotate counterclockwise center_point ([float, float]): (x,y) point to rotate around Returns: A new shape list with rotated vertices """ if center_point is None: center_point = centroid_for_uncomputed_shapes(shape_list) return [rotate_polygon(s, angle, center_point) for s in shape_list]
64c4ff717fd432a187d2616263405ae89a0d89f8
3,657,920
def _large_compatible_negative(tensor_type): """Large negative number as Tensor. This function is necessary because the standard value for epsilon in this module (-1e9) cannot be represented using tf.float16 Args: tensor_type: a dtype to determine the type. Returns: a large negative number. """ if tensor_type == tf.float16: return tf.float16.min return -1e9
c73a9e2de341d771ec07ecf2b2a178911ecc27bd
3,657,921
def classified_unread_counts(): """ Unread counts return by helper.classify_unread_counts function. """ return { 'all_msg': 12, 'all_pms': 8, 'unread_topics': { (1000, 'Some general unread topic'): 3, (99, 'Some private unread topic'): 1 }, 'unread_pms': { 1: 2, 2: 1, }, 'unread_huddles': { frozenset({1001, 11, 12}): 3, frozenset({1001, 11, 12, 13}): 2 }, 'streams': { 1000: 3, 99: 1 } }
4d5e984641de88fd497b6c78891b7e6478bb8385
3,657,922
def company_key(company_name=DEFAULT_COMPANY_NAME): """Constructs a Datastore key for a Company entity with company_name.""" return ndb.Key('Company', company_name)
f9387ef2ee33ea87a4a9fd721f14c35ca60ac482
3,657,923
def to_n_class(digit_lst, data, labels): """to make a subset of MNIST dataset, which has particular digits Parameters ---------- digit_lst : list for example, [0,1,2] or [1, 5, 8] data : numpy.array, shape (n_samples, n_features) labels : numpy.array or list of str Returns ------- numpy.array, list of int """ if not set(digit_lst) <= set(range(10)): raise ValueError indices = [] new_labels = [] for i, x in enumerate(data): for digit in digit_lst: if labels[i] == str(digit): indices.append(i) new_labels.append(digit) return data[indices], new_labels
79652687ec0670ec00d67681711903ae01f4cc87
3,657,924
from re import T import numpy from operator import ne def acosh(x: T.Tensor) -> T.Tensor: """ Elementwise inverse hyperbolic cosine of a tensor. Args: x (greater than 1): A tensor. Returns: tensor: Elementwise inverse hyperbolic cosine. """ y = numpy.clip(x,1+T.EPSILON, numpy.inf) return ne.evaluate('arccosh(y)')
c5566c9b67b8be57be47c96762ce7371e1d4d988
3,657,925
def run_unit_tests(): """ Run unit tests against installed tools rpms """ # At the time of this writing, no unit tests exist. # A unit tests script will be run so that unit tests can easily be modified print "Running unit tests..." success, output = run_cli_cmd(["/bin/sh", UNIT_TEST_SCRIPT], False) return success, output
fd2241bd471b7de61bac922f3da485cb954fbe06
3,657,927
def encode_input_descr(prm): """ Encode process description input.""" elem = NIL("Input", *_encode_param_common(prm)) elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)] elem.attrib["maxOccurs"] = "1" if isinstance(prm, LiteralData): elem.append(_encode_literal(prm, True)) elif isinstance(prm, ComplexData): elem.append(_encode_complex(prm, True)) elif isinstance(prm, BoundingBoxData): elem.append(_encode_bbox(prm, True)) return elem
9d5db979f5da325595501a50c2031f56fd438b47
3,657,928
def poly_quo(f, g, *symbols): """Returns polynomial quotient. """ return poly_div(f, g, *symbols)[0]
2a4b04b053189db9bd5cb946b6399257b49a8afb
3,657,929
import random from typing import OrderedDict def preprocess_data(dataset, encoder, config): """ Function to perform 4 preprocessing steps: 1. Exclude classes below minimum threshold defined in config.threshold 2. Exclude all classes that are not referenced in encoder.classes 3. Encode and normalize data into (path: str, label: int) tuples 4. Partition data samples into fractional splits defined in config.data_splits_meta Parameters ---------- dataset : BaseDataset Any instance of BaseDataset or its subclasses encoder : LabelEncoder Description of parameter `encoder`. config : Namespace or stuf.stuf Config object containing the attributes/properties: config.threshold config.data_splits_meta Returns ------- dict Dictionary mapping from keys defined in config.data_splits_meta.keys(), to lists of tuples representing each sample. Examples ------- Examples should be written in doctest format, and should illustrate how to use the function/class. >>> dataset = LeavesDataset() ... encoder = LabelEncoder(dataset.data.family) ... data_splits = preprocess_data(dataset, encoder, config) """ dataset.exclude_rare_classes(threshold=config.threshold) encoder.encoder = dataset.classes dataset, _ = dataset.enforce_class_whitelist(class_names=encoder.classes) x = list(dataset.data['path'].values)#.reshape((-1,1)) y = np.array(encoder.encode(dataset.data['family'])) # import pdb;pdb.set_trace() shuffled_data = list(zip(x,y)) random.shuffle(shuffled_data) partitioned_data = partition_data(data=shuffled_data, partitions=OrderedDict(config.data_splits_meta) ) return {k:v for k,v in partitioned_data.items() if len(v)>0}
ed7f7382c4d1c8bc6ce718605b9d64cc2cb6ff6e
3,657,930
from dronekit.mavlink import MAVConnection def connect(ip, _initialize=True, wait_ready=None, timeout=30, still_waiting_callback=default_still_waiting_callback, still_waiting_interval=1, status_printer=None, vehicle_class=None, rate=4, baud=115200, heartbeat_timeout=30, source_system=255, source_component=0, use_native=False): """ Returns a :py:class:`Vehicle` object connected to the address specified by string parameter ``ip``. Connection string parameters (``ip``) for different targets are listed in the :ref:`getting started guide <get_started_connecting>`. The method is usually called with ``wait_ready=True`` to ensure that vehicle parameters and (most) attributes are available when ``connect()`` returns. .. code:: python from dronekit import connect # Connect to the Vehicle using "connection string" (in this case an address on network) vehicle = connect('127.0.0.1:14550', wait_ready=True) :param String ip: :ref:`Connection string <get_started_connecting>` for target address - e.g. 127.0.0.1:14550. :param Bool/Array wait_ready: If ``True`` wait until all default attributes have downloaded before the method returns (default is ``None``). The default attributes to wait on are: :py:attr:`parameters`, :py:attr:`gps_0`, :py:attr:`armed`, :py:attr:`mode`, and :py:attr:`attitude`. You can also specify a named set of parameters to wait on (e.g. ``wait_ready=['system_status','mode']``). For more information see :py:func:`Vehicle.wait_ready <Vehicle.wait_ready>`. :param status_printer: (deprecated) method of signature ``def status_printer(txt)`` that prints STATUS_TEXT messages from the Vehicle and other diagnostic information. By default the status information is handled by the ``autopilot`` logger. :param Vehicle vehicle_class: The class that will be instantiated by the ``connect()`` method. This can be any sub-class of ``Vehicle`` (and defaults to ``Vehicle``). :param int rate: Data stream refresh rate. The default is 4Hz (4 updates per second). :param int baud: The baud rate for the connection. The default is 115200. :param int heartbeat_timeout: Connection timeout value in seconds (default is 30s). If a heartbeat is not detected within this time an exception will be raised. :param int source_system: The MAVLink ID of the :py:class:`Vehicle` object returned by this method (by default 255). :param int source_component: The MAVLink Component ID fo the :py:class:`Vehicle` object returned by this method (by default 0). :param bool use_native: Use precompiled MAVLink parser. .. note:: The returned :py:class:`Vehicle` object acts as a ground control station from the perspective of the connected "real" vehicle. It will process/receive messages from the real vehicle if they are addressed to this ``source_system`` id. Messages sent to the real vehicle are automatically updated to use the vehicle's ``target_system`` id. It is *good practice* to assign a unique id for every system on the MAVLink network. It is possible to configure the autopilot to only respond to guided-mode commands from a specified GCS ID. The ``status_printer`` argument is deprecated. To redirect the logging from the library and from the autopilot, configure the ``dronekit`` and ``autopilot`` loggers using the Python ``logging`` module. :returns: A connected vehicle of the type defined in ``vehicle_class`` (a superclass of :py:class:`Vehicle`). """ if not vehicle_class: vehicle_class = Vehicle handler = MAVConnection(ip, baud=baud, source_system=source_system, source_component=source_component, use_native=use_native) vehicle = vehicle_class(handler) if status_printer: vehicle._autopilot_logger.addHandler(ErrprinterHandler(status_printer)) if _initialize: vehicle.initialize(rate=rate, heartbeat_timeout=heartbeat_timeout) if wait_ready: if wait_ready is True: vehicle.wait_ready(still_waiting_interval=still_waiting_interval, still_waiting_callback=still_waiting_callback, timeout=timeout) else: vehicle.wait_ready(*wait_ready) return vehicle
3cd30bcc35b308913a5f54f39f2e0fb7a5583032
3,657,931
import json from datetime import datetime async def ready(request): """ For Kubernetes readiness probe, """ try: # check redis valid. if app.redis_pool: await app.redis_pool.save('health', 'ok', 1) # check mysql valid. if app.mysql_pool: sql = "SELECT 666" result = await app.mysql_pool.fetchone(sql) if result is None: raise ServerError(error='内部错误', code='10500', message="msg") except Exception as e: raise ServerError(error='内部错误', code='10500', message="msg") return json({ 'pong': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'version': app.config['API_VERSION'] })
f776787f65609fa341eb360c801cf8ebdc16a2eb
3,657,933
def surface_area(polygon_mesh): """ Computes the surface area for a polygon mesh. Parameters ---------- polygon_mesh : ``PolygonMesh`` object Returns ------- result : surface area """ if isinstance(polygon_mesh, polygonmesh.FaceVertexMesh): print("A FaceVertex Mesh") result = 0.0 for face in polygon_mesh.faces: v1, v2, v3 = face result += 0.5 * abs(np.linalg.norm( np.cross( polygon_mesh.vertices[v2]-polygon_mesh.vertices[v1], polygon_mesh.vertices[v3]-polygon_mesh.vertices[v1] ))) return result return None
587740d493ef5762c85f75f81d98e141121b5d7d
3,657,934
from scipy.optimize import fsolve # non-linear solver import numpy as np def gas_zfactor(T_pr, P_pr): """ Calculate Gas Compressibility Factor For range: 0.2 < P_pr < 30; 1 < T_pr < 3 (error 0.486%) (Dranchuk and Aboukassem, 1975) """ # T_pr : calculated pseudoreduced temperature # P_pr : calculated pseudoreduced pressure if T_pr > 1 and T_pr < 3 and P_pr > 0.2 and P_pr < 30: a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475 a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210 def f(y): rho_pr, z = y c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5)) c2 = a6 + (a7/T_pr) + (a8/(T_pr**2)) c3 = a9*((a7/T_pr) + (a8/(T_pr**2))) c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2))) f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1 f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr)) return[f1, f2] pseudo_rho, z_factor = fsolve(f, [1, 1]) # initial guess else: pseudo_rho, z_factor = np.nan, np.nan return(pseudo_rho, z_factor)
b9b1d770483737da8277a89b3f1100ea0c49c1c0
3,657,935
def format_value_with_percentage(original_value): """ Return a value in percentage format from an input argument, the original value """ percentage_value = "{0:.2%}".format(original_value) return percentage_value
78bfb753b974bc7cbe3ac96f58ee49251063d2e7
3,657,936
import numpy def get_Z_and_extent(topofile): """Get data from an ESRI ASCII file.""" f = open(topofile, "r") ncols = int(f.readline().split()[1]) nrows = int(f.readline().split()[1]) xllcorner = float(f.readline().split()[1]) yllcorner = float(f.readline().split()[1]) cellsize = float(f.readline().split()[1]) nodatavalue = float(f.readline().split()[1]) data = numpy.zeros((nrows, ncols), dtype=numpy.float64) for i in range(nrows): data[i, :] = f.readline().strip().split() f.close() extent = [xllcorner, xllcorner+ncols*cellsize, yllcorner, yllcorner+nrows*cellsize] return data, extent
e96db5c2ae4a0d6c94654d7ad29598c3231ec186
3,657,937
from typing import Sequence from typing import MutableMapping import copy def modified_config( file_config: submanager.models.config.ConfigPaths, request: pytest.FixtureRequest, ) -> submanager.models.config.ConfigPaths: """Modify an existing config file and return the path.""" # Get and check request params request_param = getattr(request, PARAM_ATTR, None) if request_param is None: raise ValueError("Update dict must be passed via request param") if isinstance(request_param, Sequence): update_dict, disable_all = request_param else: update_dict = request_param disable_all = False if not isinstance(update_dict, MutableMapping): raise TypeError( f"Update dict {update_dict!r} must be a mapping, " f"not {type(update_dict)!r}", ) # Disable all items if requested config_data = submanager.config.utils.load_config(file_config.static) if disable_all: config_data_modified = ( submanager.utils.dicthelpers.process_items_recursive( dict(config_data), fn_torun=lambda value: False, keys_match={"enabled"}, inplace=False, ) ) if isinstance(disable_all, str): config_data_level = config_data_modified for key in disable_all.split("."): config_data_level = config_data_level[key] if config_data_level.get("enabled", None) is not None: config_data_level["enabled"] = True else: config_data_modified = copy.deepcopy(dict(config_data)) # Modify config and write it back config_data_modified = submanager.utils.dicthelpers.update_recursive( base=config_data_modified, update=dict(update_dict), inplace=False, ) submanager.config.utils.write_config( config_data_modified, config_path=file_config.static, ) return file_config
8a453233b6340b50fdcbc4d3bf7b2f1f1e7e15ce
3,657,938
import torch def train_discrim(discrim, state_features, actions, optim, demostrations, settings): """demostractions: [state_features|actions] """ criterion = torch.nn.BCELoss() for _ in range(settings.VDB_UPDATE_NUM): learner = discrim(torch.cat([state_features, actions], dim=-1)) expert = discrim(demostrations) discrim_loss = criterion(learner, torch.ones( [len(state_features), 1])) + criterion( expert, torch.zeros(len(demostrations), 1)) optim.zero_grad() discrim_loss.backward() optim.step() expert_acc = ((discrim(demostrations) < 0.5).float()).mean() learner_acc = ((discrim(torch.cat([state_features, actions], dim=1)) > 0.5).float()).mean() return expert_acc, learner_acc
7e6c16fc396b371e92d3a04179eacb9cae63659c
3,657,939
def filter_column(text, column, start=0, sep=None, **kwargs): """ Filters (like grep) lines of text according to a specified column and operator/value :param text: a string :param column: integer >=0 :param sep: optional separator between words (default is arbitrary number of blanks) :param kwargs: operator=value eg eq='exact match', contains='substring', startswith='prefix' etc... :return: """ if len(kwargs) != 1: raise TypeError("Missing or too many keyword parameter in filter_column") op, value = kwargs.items()[0] if op in ('eq', 'equals'): op = '__eq__' elif op in ('contains', 'includes'): op = '__contains__' elif not op in ('startswith', 'endswith'): raise ValueError("Unknown filter_column operator: {}".format(op)) lines = text.splitlines() if isinstance(text, basestring) else text if start: lines = lines[start:] values = [] for line in lines: elts = line.split(sep) if sep else line.split() if elts and column < len(elts): elt = elts[column] if getattr(elt, op)(value): values.append(line.strip()) return values
f7a788d2d79dba33961213c6bc469d41a0151812
3,657,941
def max_tb(collection): # pragma: no cover """Returns the maximum number of TB recorded in the collection""" max_TB = 0 for doc in collection.find({}).sort([('total_TB',-1)]).limit(1): max_TB = doc['total_TB'] return max_TB
bde417de0b38de7a7b5e4e3db8c05e87fa6c55ca
3,657,942
def prep_im_for_blob(im, pixel_means, target_size_1, target_size_2, max_size_1, max_size_2): """Mean subtract and scale an image for use in a blob.""" im = im.astype(np.float32, copy=False) im -= pixel_means im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale_1 = float(target_size_1) / float(im_size_min) im_scale_2 = float(target_size_2) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale_1 * im_size_max) > max_size_1: im_scale_1 = float(max_size_1) / float(im_size_max) if np.round(im_scale_2 * im_size_max) > max_size_2: im_scale_2 = float(max_size_2) / float(im_size_max) im_1 = cv2.resize(im, None, None, fx=im_scale_1, fy=im_scale_1, interpolation=cv2.INTER_LINEAR) im_2 = cv2.resize(im, None, None, fx=im_scale_2, fy=im_scale_2, interpolation=cv2.INTER_LINEAR) return im_1, im_2, im_scale_1, im_scale_2
a1842d918149f5d1ccc52e04cc499005570b72ea
3,657,943
def plotann(annotation, title = None, timeunits = 'samples', returnfig = False): """ Plot sample locations of an Annotation object. Usage: plotann(annotation, title = None, timeunits = 'samples', returnfig = False) Input arguments: - annotation (required): An Annotation object. The sample attribute locations will be overlaid on the signal. - title (default=None): A string containing the title of the graph. - timeunits (default='samples'): String specifying the x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'. - returnfig (default=False): Specifies whether the figure is to be returned as an output argument Output argument: - figure: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True. Note: The plotrec function is useful for plotting annotations on top of signal waveforms. Example Usage: import wfdb annotation = wfdb.rdann('sampledata/100', 'atr', sampfrom = 100000, sampto = 110000) annotation.fs = 360 wfdb.plotann(annotation, timeunits = 'minutes') """ # Check the validity of items used to make the plot # Get the x axis annotation values to plot plotvals = checkannplotitems(annotation, title, timeunits) # Create the plot fig=plt.figure() plt.plot(plotvals, np.zeros(len(plotvals)), 'r+') if title is not None: plt.title(title) # Axis Labels if timeunits == 'samples': plt.xlabel('index/sample') else: plt.xlabel('time/'+timeunits[:-1]) plt.show(fig) # Return the figure if requested if returnfig: return fig
2159c1ffed52ef6524990f861d7e986b7aa00c25
3,657,945
def match_assignments(nb_assignments, course_id): """ Check sqlalchemy table for match with nbgrader assignments from a specified course. Creates a dictionary with nbgrader assignments as the key If match is found, query the entry from the table and set as the value. Else, set the value to None """ nb_matches = {assignment.name:AssignmentMatch.query.filter_by(nbgrader_assign_name=assignment.name, course_id=course_id).first() for assignment in nb_assignments} return nb_matches
22158bc0d3655a78b8e5b6cb245b781e187f1481
3,657,946
def tan(input): """Computes tangent of values in ``input``. :rtype: TensorList of tan(input). If input is an integer, the result will be float, otherwise the type is preserved. """ return _arithm_op("tan", input)
27e6487591ff4d207baea094293be83ef22a4099
3,657,947
def recall_from_IoU(IoU, samples=500): """ plot recall_vs_IoU_threshold """ if not (isinstance(IoU, list) or IoU.ndim == 1): raise ValueError('IoU needs to be a list or 1-D') iou = np.float32(IoU) # Plot intersection over union IoU_thresholds = np.linspace(0.0, 1.0, samples) recall = np.zeros_like(IoU_thresholds) for idx, IoU_th in enumerate(IoU_thresholds): tp, relevant = 0, 0 inds, = np.where(iou >= IoU_th) recall[idx] = len(inds) * 1.0 / len(IoU) return recall, IoU_thresholds
9c24a4e546a76998339ce85e02fae6fec3adb00d
3,657,948
import math def _GetImage(options): """Returns the ndvi regression image for the given options. Args: options: a dict created by _ReadOptions() containing the request options Returns: An ee.Image with the coefficients of the regression and a band called "rmse" containing the Root Mean Square Error for the ndvi value calculated by the regression or None if collection is empty. """ # renaming the used options regression = options["regression"] start = options["start"] collection = _GetCollection(options) # _GetCollection() returns None if collection is empty if collection is None: return None # Function to calculate the values needed for a regression with a polynomial of degree 1 def makePoly1Variables(img): date = img.date() doy = date.getRelative("day", "year") x1 = doy x0 = 1 return (img.select() .addBands(ee.Image.constant(x0)) # 0. a0 constant term .addBands(ee.Image.constant(x1)) # 1. a1*x .addBands(img.normalizedDifference(["NIR","RED"])) # 2. response variable (NDVI) .toFloat()) # Function to calculate the values needed for a regression with a polynomial of degree 2 def makePoly2Variables(img): date = img.date() doy = date.getRelative("day", "year") x2 = doy.pow(2) x1 = doy x0 = 1 return (img.select() .addBands(ee.Image.constant(x0)) # 0. a0 constant term .addBands(ee.Image.constant(x1)) # 1. a1*x .addBands(ee.Image.constant(x2)) # 2. a2*x^2 .addBands(img.normalizedDifference(["NIR","RED"])) # 4. response variable (NDVI) .toFloat()) # Function to calculate the values needed for a regression with a polynomial of degree 3 def makePoly3Variables(img): date = img.date() doy = date.getRelative("day", "year") x3 = doy.pow(3) x2 = doy.pow(2) x1 = doy x0 = 1 return (img.select() .addBands(ee.Image.constant(x0)) # 0. a0 constant term .addBands(ee.Image.constant(x1)) # 1. a1*x .addBands(ee.Image.constant(x2)) # 2. a2*x^2 .addBands(ee.Image.constant(x3)) # 3. a3*x^3 .addBands(img.normalizedDifference(["NIR","RED"])) # 4. response variable (NDVI) .toFloat()) # Function to calculate the values needed for a regression with the model after Zhu & Woodcock def makeZhuWoodVariables(img): seconds = img.date().millis().divide(1000).floor() seconds_start = ee.Date("%s-01-01" % start).millis().divide(1000).floor() seconds_offset = seconds.subtract(seconds_start) sin_intra = ee.Number(2).multiply(math.pi).divide(365*24*60*60).multiply(seconds_offset).sin() cos_intra = ee.Number(2).multiply(math.pi).divide(365*24*60*60).multiply(seconds_offset).cos() inter = seconds_offset return (img.select() .addBands(ee.Image.constant(1)) # 0. constant term .addBands(ee.Image.constant(cos_intra)) # 1. cos intra-annual .addBands(ee.Image.constant(sin_intra)) # 2. sin intra-annual .addBands(ee.Image.constant(inter)) # 3. inter-annual .addBands(img.normalizedDifference(["NIR","RED"])) # 5. response variable (NDVI) .toFloat()) makeVariables = {"poly1": makePoly1Variables,"poly2": makePoly2Variables, "poly3": makePoly3Variables, "zhuWood": makeZhuWoodVariables} # calculate the needed values for the regression collection_prepared = collection.map(makeVariables[regression]) predictorsCount = {"poly1": 2,"poly2": 3, "poly3": 4, "zhuWood": 4} # counts the ndvi values per pixel countValues = collection_prepared.select("nd").reduce(ee.Reducer.count()) # masks pixels with less than 2 * number of predictors, to deliver better results def countMask(img): return img.updateMask(countValues.gt(predictorsCount[regression]*2-1)) # use the countMask collection_prepared = collection_prepared.map(countMask) # doing the regression coefficients = collection_prepared.reduce(ee.Reducer.linearRegression(predictorsCount[regression], 1)) # flattens regression coefficients to one image with multiple bands flattenPattern = {"poly1": ["a0", "a1"], "poly2": ["a0", "a1", "a2"], "poly3": ["a0", "a1", "a2", "a3"], "zhuWood": ["a0", "a1", "a2", "a3"]} renamePattern = {"poly1": "doy", "poly2": "doy", "poly3": "doy", "zhuWood": "sec"} coefficientsImage = coefficients.select(["coefficients"]).arrayFlatten([flattenPattern[regression],[renamePattern[regression]]]) # flattens the root mean square of the predicted ndvi values rmse = coefficients.select("residuals").arrayFlatten([["rmse"]]) # combines coefficients and rmse and returns them a one ee.Image return coefficientsImage.addBands(rmse)
00b4bd82e772a8afa8c4f92c3dd9afa880af79f2
3,657,949
def get_registered_plugins(registry, as_instances=False, sort_items=True): """Get registered plugins. Get a list of registered plugins in a form if tuple (plugin name, plugin description). If not yet auto-discovered, auto-discovers them. :param registry: :param bool as_instances: :param bool sort_items: :return list: """ ensure_autodiscover() if as_instances: return registry._registry registered_plugins = [] for uid, plugin in registry._registry.items(): plugin_name = safe_text(plugin.name) registered_plugins.append((uid, plugin_name)) if sort_items: registered_plugins.sort() return registered_plugins
68b695ebe3de95a86d37831fe38ce934bcced16c
3,657,950
import time def datetime_to_timestamp(d): """convert a datetime object to seconds since Epoch. Args: d: a naive datetime object in default timezone Return: int, timestamp in seconds """ return int(time.mktime(d.timetuple()))
356ac090b0827d49e9929a7ef26041b26c6cc690
3,657,951
import torch def gumbel_softmax(logits, temperature): """From https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f logits: a tensor of shape (*, n_class) returns an one-hot vector of shape (*, n_class) """ y = gumbel_softmax_sample(logits, temperature) shape = y.size() _, ind = y.max(dim=-1) y_hard = torch.zeros_like(y).view(-1, shape[-1]) y_hard.scatter_(1, ind.view(-1, 1), 1) y_hard = y_hard.view(*shape) return (y_hard - y).detach() + y
49a79bf5955cfc01fd27f0a56c23d001e3ef65cc
3,657,952
def in_whitelist(address): """ Test if the given email address is contained in the list of allowed addressees. """ if WHITELIST is None: return True else: return any(regex.search(address) for regex in WHITELIST)
ed552f16a2cd4b9d5e97033e47d5ec8950841164
3,657,953
def decomposePath(path): """ :example: >>> decomposePath(None) >>> decomposePath("") >>> decomposePath(1) >>> decomposePath("truc") ('', 'truc', '', 'truc') >>> decomposePath("truc.txt") ('', 'truc', 'txt', 'truc.txt') >>> decomposePath("/home/truc.txt") ('/home/', 'truc', 'txt', 'truc.txt') >>> decomposePath("/home/truc.txt.bz2") ('/home/', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath("/truc.txt.bz2") ('/', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath("./truc.txt.bz2") ('./', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath(".truc.txt.bz2") ('', '.truc.txt', 'bz2', '.truc.txt.bz2') """ if path is None or type(path) is not str or len(path) == 0: return None filenameExt = path.split("/")[-1] dir = path[0:-len(filenameExt)] filename = ".".join(filenameExt.split(".")[0:-1]) ext = filenameExt.split(".")[-1] if len(filename) == 0 and len(ext) > 0: filename, ext = ext, filename return (dir, filename, ext, filenameExt)
7b45cfe64f631912fc56246f404ddbea51b9f1ec
3,657,954
def BSCLLR(c,p): """ c: A list of ones and zeros representing a codeword received over a BSC. p: Flip probability of the BSC. Returns log-likelihood ratios for c. """ N = len(c) evidence = [0]*N for i in range(N): if (c[i]): evidence[i] = log(p/(1-p)) else: evidence[i] = log((1-p)/p) return evidence
2ee6f4a72a8c2aa3257ae00e8374511f74edcbdb
3,657,955
import torch def _res_dynamics_fwd( real_input, imag_input, sin_decay, cos_decay, real_state, imag_state, threshold, w_scale, dtype=torch.int32 ): """ """ dtype = torch.int64 device = real_state.device real_old = (real_state * w_scale).clone().detach().to(dtype).to(device) imag_old = (imag_state * w_scale).clone().detach().to(dtype).to(device) sin_decay_int = (sin_decay).clone().detach().to(dtype).to(device) cos_decay_int = (cos_decay).clone().detach().to(dtype).to(device) real = torch.zeros_like(real_input) imag = torch.zeros_like(imag_input) threshold *= w_scale num_steps = real_input.shape[-1] for n in range(num_steps): real_new = right_shift_to_zero(cos_decay_int * real_old, 12) \ - right_shift_to_zero(sin_decay_int * imag_old, 12) \ + (w_scale * real_input[..., n]).to(dtype) imag_new = right_shift_to_zero(sin_decay_int * real_old, 12) \ + right_shift_to_zero(cos_decay_int * imag_old, 12) \ + (w_scale * imag_input[..., n]).to(dtype) if threshold >= 0: spike_new = (imag_new >= threshold).to(dtype) real_old = ((1 - spike_new) * real_new).to(dtype) imag_old = ( spike_new * (threshold - 1) + (1 - spike_new) * imag_new ).to(dtype) else: real_old = real_new imag_old = imag_new real[..., n] = real_new / w_scale imag[..., n] = imag_new / w_scale return real, imag
259b520c9ba4491931726b02ff51bc1c69283cdd
3,657,956
def tokenize_finding(finding): """Turn the finding into multiple findings split by whitespace.""" tokenized = set() tokens = finding.text.split() cursor = 0 # Note that finding.start and finding.end refer to the location in the overall # text, but finding.text is just the text for this finding. for token in tokens: start = finding.text.find(token, cursor) cursor = end = start + len(token) tokenized.add(Finding( finding.category, start + finding.start, end + finding.start, token, finding.context_start, finding.raw_context)) return tokenized
28974a87bdb006bbdf37fff68345a9df81ea0962
3,657,958
import scipy def gaussian_filter_density(gt): """generate ground truth density map Args: gt: (height, width), object center is 1.0, otherwise 0.0 Returns: density map """ density = np.zeros(gt.shape, dtype=np.float32) gt_count = np.count_nonzero(gt) if gt_count == 0: return density pts = np.array(list(zip(np.nonzero(gt)[1], np.nonzero(gt)[0]))) # (x,y) leaf_size = 2048 # build kd tree tree = scipy.spatial.KDTree(pts.copy(), leafsize=leaf_size) # query kd tree distances, locations = tree.query(pts, k=4) for i, pt in enumerate(pts): pt2d = np.zeros(gt.shape, dtype=np.float32) pt2d[pt[1], pt[0]] = 1. if gt_count > 1: sigma = (distances[i][1] + distances[i][2] + distances[i][3]) * 0.085 sigma = min(sigma, 999) # avoid inf else: raise NotImplementedError('should not be here!!') density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant') return density
9a51de844a08af18e5d1f72d368dbd6b05d24d34
3,657,959
def RGBfactorstoBaseandRange( lumrange: list[int, int], rgbfactors: list[float, float, float]): """Get base color luminosity and luminosity range from color expressed as r, g, b float values and min and max byte luminosity values Args: lumrange: [minval: byte maxval: byte] rgbfactors: color as [r: float, g: float, b: float] Returns: base luminosity as [r: byte, g: byte, b: byte] luminosity range as [r: byte, g: byte, b: byte] """ baselum = intscalarmulvect( rgbfactors, lumrange[0]) lumrange = subvect(scalarmulvect( rgbfactors, lumrange[1]), baselum) return baselum, lumrange
47fba5a98b324fc27869fee8b03903f844ef2c38
3,657,960
def mean_by_orbit(inst, data_label): """Mean of data_label by orbit over Instrument.bounds Parameters ---------- data_label : string string identifying data product to be averaged Returns ------- mean : pandas Series simple mean of data_label indexed by start of each orbit """ return _core_mean(inst, data_label, by_orbit=True)
55e3edac3231d4c42428cd87ee758f1b27d959b9
3,657,961
from typing import Callable from typing import Optional def quantile_constraint( column: str, quantile: float, assertion: Callable[[float], bool], where: Optional[str] = None, hint: Optional[str] = None, ) -> Constraint: """ Runs quantile analysis on the given column and executes the assertion column: Column to run the assertion on quantile: Which quantile to assert on assertion Callable that receives a float input parameter (the computed quantile) and returns a boolean hint: A hint to provide additional context why a constraint could have failed """ quant = Quantile(column, quantile, where) constraint = AnalysisBasedConstraint[float]( quant, assertion, hint=hint # type: ignore[arg-type] ) return NamedConstraint(constraint, f"QuantileConstraint({quant})")
b3e3924a830ec7fd47de981e1ae9eb3f1810c2a1
3,657,962
from typing import Tuple import torch def _compute_rank( kg_embedding_model, pos_triple, corrupted_subject_based, corrupted_object_based, device, ) -> Tuple[int, int]: """ :param kg_embedding_model: :param pos_triple: :param corrupted_subject_based: :param corrupted_object_based: :param device: :param all_pos_triples_hashed: This parameter isn't used but is necessary for compatability """ corrupted_subject_based = torch.tensor( corrupted_subject_based, dtype=torch.long, device=device ) corrupted_object_based = torch.tensor(corrupted_object_based, dtype=torch.long, device=device) scores_of_corrupted_subjects = kg_embedding_model.predict(corrupted_subject_based) scores_of_corrupted_objects = kg_embedding_model.predict(corrupted_object_based) pos_triple = np.array(pos_triple) pos_triple = np.expand_dims(a=pos_triple, axis=0) pos_triple = torch.tensor(pos_triple, dtype=torch.long, device=device) score_of_positive = kg_embedding_model.predict(pos_triple) scores_subject_based = np.append(arr=scores_of_corrupted_subjects, values=score_of_positive) indice_of_pos_subject_based = scores_subject_based.size - 1 scores_object_based = np.append(arr=scores_of_corrupted_objects, values=score_of_positive) indice_of_pos_object_based = scores_object_based.size - 1 _, sorted_score_indices_subject_based = torch.sort( torch.tensor(scores_subject_based, dtype=torch.float), descending=False) sorted_score_indices_subject_based = sorted_score_indices_subject_based.cpu().numpy() _, sorted_score_indices_object_based = torch.sort( torch.tensor(scores_object_based, dtype=torch.float), descending=False) sorted_score_indices_object_based = sorted_score_indices_object_based.cpu().numpy() # Get index of first occurrence that fulfills the condition rank_of_positive_subject_based = np.where(sorted_score_indices_subject_based == \ indice_of_pos_subject_based)[0][0] rank_of_positive_object_based = np.where(sorted_score_indices_object_based == \ indice_of_pos_object_based)[0][0] return ( rank_of_positive_subject_based, rank_of_positive_object_based, )
2b5043dfed43907563c473141257626bb93027b7
3,657,963
def _get_bool_argument(ctx: ClassDefContext, expr: CallExpr, name: str, default: bool) -> bool: """Return the boolean value for an argument to a call or the default if it's not found. """ attr_value = _get_argument(expr, name) if attr_value: ret = ctx.api.parse_bool(attr_value) if ret is None: ctx.api.fail('"{}" argument must be True or False.'.format(name), expr) return default return ret return default
7f903f884edcb4af328207a0b7d2569cefce0a93
3,657,964
import json def validate_filter_parameter(string): """ Extracts a single filter parameter in name[=value] format """ result = () if string: comps = string.split('=', 1) if comps[0]: if len(comps) > 1: # In the portal, if value textbox is blank we store the value as empty string. # In CLI, we should allow inputs like 'name=', which correspond to empty string value. # But there is no way to differentiate between CLI inputs 'name=' and 'name=""'. # So even though "" is invalid JSON escaped string, we will accept it and set the value as empty string. filter_param_value = '\"\"' if comps[1] == "" else comps[1] try: # Ensure that provided value of this filter parameter is valid JSON. Error out if value is invalid JSON. filter_param_value = json.loads(filter_param_value) except ValueError: raise CLIError('Filter parameter value must be a JSON escaped string. "{}" is not a valid JSON object.'.format(filter_param_value)) result = (comps[0], filter_param_value) else: result = (string, '') else: # Error out on invalid arguments like '=value' or '=' raise CLIError('Invalid filter parameter "{}". Parameter name cannot be empty.'.format(string)) return result
8258cff656889a57aaeb24644ea4efc9a60a6997
3,657,965
def ones(distribution, dtype=float): """Create a LocalArray filled with ones.""" la = LocalArray(distribution=distribution, dtype=dtype) la.fill(1) return la
d3caa46b76932a44d441574c78ebbd9c4e8d29f9
3,657,966
def update_podcast_url(video): """Query the DDB table for this video. If found, it means we have a podcast m4a stored in S3. Otherwise, return no podcast. """ try: response = PODCAST_TABLE_CLIENT.query( KeyConditionExpression=Key('session').eq(video.session_id) & Key('year').eq(video.get_published_year()) ) except ClientError as error: print('Problem getting data from DynamoDB: {}'.format(error)) return False else: if response['Count'] == 1: video.podcast_url = response['Items'][0]['url'] return True
50a39aceaba7980dff90043bf444b01607b258ae
3,657,967
def translate(filename): """ File editing handler """ if request.method == 'POST': return save_translation(app, request, filename) else: return open_editor_form(app, request, filename)
5f9419db30ebd76e17f9f5c6efd746b3ddc1d8b0
3,657,968
def read_fileset(fileset): """ Extract required data from the sdoss fileset. """ feat_data = { 'DATE_OBS': [], 'FEAT_HG_LONG_DEG': [], 'FEAT_HG_LAT_DEG': [], 'FEAT_X_PIX': [], 'FEAT_Y_PIX': [], 'FEAT_AREA_DEG2': [], 'FEAT_FILENAME': []} for current_file in fileset: current_date = get_date_obs(current_file) current_data = read_csv(current_file) if (len(current_data) == 0): LOG.error("Empty file: %s!", current_file) return None for cd in current_data: feat_data['DATE_OBS'].append(current_date) feat_data['FEAT_HG_LONG_DEG'].append(float(cd['FEAT_HG_LONG_DEG'])) feat_data['FEAT_HG_LAT_DEG'].append(float(cd['FEAT_HG_LAT_DEG'])) feat_data['FEAT_X_PIX'].append(int(cd['FEAT_X_PIX'])) feat_data['FEAT_Y_PIX'].append(int(cd['FEAT_Y_PIX'])) feat_data['FEAT_AREA_DEG2'].append(float(cd['FEAT_AREA_DEG2'])) feat_data['FEAT_FILENAME'].append(current_file) return feat_data
3c1c9018444af04ca8cc7d95176032ad92c42928
3,657,969
def get_branch_index(edge_index, edge_degree, branch_cutting_frequency=1000): """Finds the branch indexes for each branch in the MST. Parameters ---------- edge_index : array The node index of the ends of each edge. edge_degree : array The degree for the ends of each edge. branch_cutting_frequency : int, optional An optimisation parameter, used to remove edges that have already been placed into a branch. This significantly improves the speed of the algorithm as branches that are already constructed are now removed from the branch finder. Returns ------- branch_index : list A list of branches where each branch is a list of the edge index of edges contained in each branch. branch_index_rejected : list A list of branches that have not been completed. This will occur only if a subset of the edge indexes of the full tree is provided. """ degree1 = edge_degree[0] degree2 = edge_degree[1] index1 = edge_index[0] index2 = edge_index[1] condition = np.where((degree1 == 2.) & (degree2 == 2.))[0] index_branch_mid = condition index_branch_mid1 = index1[index_branch_mid] index_branch_mid2 = index2[index_branch_mid] condition = np.where(((degree1 == 2.) & (degree2 != 2.)) | ((degree1 != 2.) & (degree2 == 2.)))[0] index_branch_end = condition index_branch_end1 = index1[index_branch_end] index_branch_end2 = index2[index_branch_end] degree_branch_end1 = degree1[index_branch_end] degree_branch_end2 = degree2[index_branch_end] check_mid = np.ones(len(index_branch_mid)) check_end = np.ones(len(index_branch_end)) branch_index = [] branch_index_rejected = [] mask_end = np.ones(index_branch_end.shape, dtype=np.bool) mask_mid = np.ones(index_branch_mid.shape, dtype=np.bool) count = 0 item = 0 while item < len(index_branch_end): if check_end[item] == 1.: check_end[item] = 0. done = 0. _twig = [] _twig.append(index_branch_end[item]) if degree_branch_end1[item] == 2.: node_index = index_branch_end1[item] elif degree_branch_end2[item] == 2.: node_index = index_branch_end2[item] else: assert ValueError("branch edge incorrect.") mask_end[item] = False while done == 0.: condition = np.where(((check_mid == 1.) & (index_branch_mid1 == node_index)) | ((check_mid == 1.) & (index_branch_mid2 == node_index)))[0] if len(condition) == 0: condition = np.where(((check_end == 1.) & (index_branch_end1 == node_index)) | ((check_end == 1.) & (index_branch_end2 == node_index)))[0] if len(condition) == 0: branch_index_rejected = branch_index_rejected + \ np.ndarray.tolist(np.ndarray.flatten(np.array(_twig))) done = 1. else: check_end[condition] = 0. _twig.append(index_branch_end[condition]) done = 1. mask_end[condition] = False branch_index.append(np.ndarray.tolist(np.ndarray.flatten(np.array(_twig)))) else: if len(condition) == 1: check_mid[condition] = 0. _twig.append(index_branch_mid[condition]) if index_branch_mid1[condition] == node_index: node_index = index_branch_mid2[condition] elif index_branch_mid2[condition] == node_index: node_index = index_branch_mid1[condition] else: assert ValueError("Identification error.") mask_mid[condition] = False else: assert ValueError("Found more than one vertex.") else: pass if count % branch_cutting_frequency == 0 and count != 0: index_branch_end = index_branch_end[mask_end] check_end = check_end[mask_end] index_branch_end1 = index_branch_end1[mask_end] index_branch_end2 = index_branch_end2[mask_end] degree_branch_end1 = degree_branch_end1[mask_end] degree_branch_end2 = degree_branch_end2[mask_end] index_branch_mid = index_branch_mid[mask_mid] check_mid = check_mid[mask_mid] index_branch_mid1 = index_branch_mid1[mask_mid] index_branch_mid2 = index_branch_mid2[mask_mid] mask_end = mask_end[mask_end] mask_mid = mask_mid[mask_mid] count = count + 1 item = 0 elif count % 1001 == 0: count = count + 1 item = item + 1 elif item == len(index_branch_end) - 1: index_branch_end = index_branch_end[mask_end] check_end = check_end[mask_end] index_branch_end1 = index_branch_end1[mask_end] index_branch_end2 = index_branch_end2[mask_end] degree_branch_end1 = degree_branch_end1[mask_end] degree_branch_end2 = degree_branch_end2[mask_end] index_branch_mid = index_branch_mid[mask_mid] check_mid = check_mid[mask_mid] index_branch_mid1 = index_branch_mid1[mask_mid] index_branch_mid2 = index_branch_mid2[mask_mid] mask_end = mask_end[mask_end] mask_mid = mask_mid[mask_mid] count = count + 1 item = 0 else: count = count + 1 item = item + 1 branch_index_rejected = branch_index_rejected + np.ndarray.tolist(np.ndarray.flatten(np.array(index_branch_mid))) branch_index = [np.ndarray.tolist(np.hstack(np.array(branch_index[i]))) for i in range(0, len(branch_index))] if len(branch_index_rejected) != 0: branch_index_rejected = np.ndarray.tolist(np.hstack(np.array(branch_index_rejected))) return branch_index, branch_index_rejected
3ac24625f9c67cdb60759e840b06b21f260733c9
3,657,970
def update_coverage(coverage, path, func, line, status): """Add to coverage the coverage status of a single line""" coverage[path] = coverage.get(path, {}) coverage[path][func] = coverage[path].get(func, {}) coverage[path][func][line] = coverage[path][func].get(line, status) coverage[path][func][line] = coverage[path][func][line].combine(status) return coverage
46e5a1e5c4ebba3a9483f90ada96a0f7f94d8c1d
3,657,971
def cross_product(v1, v2): """Calculate the cross product of 2 vectors as (x1 * y2 - x2 * y1).""" return v1.x * v2.y - v2.x * v1.y
871d803ef687bf80facf036549b4b2062f713994
3,657,972
def loadData(fname='Unstra.out2.00008.athdf'): """load 3d bfield and calc the current density""" #data=ath.athdf(fname,quantities=['B1','B2','B3']) time,data=ath.athdf(fname,quantities=['vel1']) vx = data['vel1'] time,data=ath.athdf(fname,quantities=['vel2']) vy = data['vel2'] time,data=ath.athdf(fname,quantities=['vel3']) vz = data['vel3'] x = data['x1f'] y = data['x2f'] z = data['x3f'] # --- def curl(vx,vy,vz,dx,dy,dz): [dzvx,dyvx,dxvx] = np.gradient(vx) [dzvy,dyvy,dxvy] = np.gradient(vy) [dzvz,dyvz,dxvz] = np.gradient(vz) cx = dyvz/dy-dzvy/dz cy = dzvx/dz-dxvz/dx cz = dxvy/dx-dyvx/dy # No need to del the reference by one manually # allow python to perform its own garbage collection # after the function return cxyz #del dzvx #del dzvy #del dzvz return cx,cy,cz # --- dx = dz = x[1]-x[0] dy = y[1]-y[0] jx,jy,jz = curl(vx,vy,vz,dx,dy,dz) w2 = jx**2+jy**2+jz**2 del jx,jy,jz,vx,vy,vz return w2
121768232fe71ce8ce3714aea70b5bf2c7493907
3,657,973
def text_iou(ground_truth: Text, prediction: Text) -> ScalarMetricValue: """ Calculates agreement between ground truth and predicted text """ return float(prediction.answer == ground_truth.answer)
5ea135b30ba93da45fb1ecd624fe7dc556f01cf5
3,657,974
def divisors(num): """ Takes a number and returns all divisors of the number, ordered least to greatest :param num: int :return: list (int) """ # Fill in the function and change the return statment. return 0
f15169b2672847294a219207f6022ad3e49338d2
3,657,975
def space_oem(*argv): """Handle oem files Usage: space-oem get <selector>... space-oem insert (- | <file>) space-oem compute (- | <selector>...) [options] space-oem list <selector>... [options] space-oem purge <selector>... [--until <until>] space-oem list-tags <selector>... space-oem tag <selector> <tag> [options] Options: get Retrieve an existing OEM from the database insert Insert an OEM into the database compute Compute OEM from an other OPM, OEM or TLE list List existing ephemerides purge Remove old OEMs. Use --last option list-tags List available tags for ephems of the selected objects tag Create a tag for a particular ephem <selector> Selector of the satellite (see help of the "sat" command) -f, --frame <frame> Frame in which to write the file to -d, --date <date> Start date of the ephem [default: midnight] (format %Y-%m-%dT%H:%M:%S) -r, --range <days> Duration of extrapolation [default: 3d] -s, --step <step> Step size of the OEM [default: 180s] -i, --interp <inter> Interpolation method (linear, lagrange) [default: lagrange] -l, --last <last> When listing print the last N OEM [default: 10] -I, --insert Insert the computed OEM into the database -F, --force Force insertion --until <until> When purging, remove all file older than this date [default: 4w] May be a duration, or a date """ return _generic_cmd("oem", space_oem.__doc__, *argv)
54c479f7008f475f778f491c7b6c5574390fd38c
3,657,976
def compare_distance(tree,target): """ Checks tree edit distance. Since every node has a unique position, we know that the node is the same when the positions are the same. Hence, a simple method of counting the number of edits one needs to do to create the target tree out of a given tree is equal to the number of positional differences. """ # check for positional overlap edit_value = 0 for node in target: node.found = False for node in tree: same_node = False for t_node in target: if node.pos[0] == t_node.pos[0] and node.pos[1] == t_node.pos[1]: same_node = True t_node.found = True if same_node == False: edit_value += 1 # count found for node in target: if not node.found: edit_value += 1 return edit_value
96b57e88b8e70dbb43231b56cbe7e9b7ebcfd10f
3,657,977
def header(name='peptide'): """ Parameters ---------- name Returns ------- """ with open('{}.pdb'.format(name), 'r') as f: file = f.read() model = file.find('\nMODEL') atom = file.find('\nATOM') if atom < 0: raise ValueError('no ATOM entries found in PDB') if model < 0: index = atom else: index = min(model, atom) return file[:index] + '\n'
84e75e34771b7c395ee36611c8d055ca1fdf67dc
3,657,978
from datetime import datetime def isoUTC2datetime(iso): """Convert and ISO8601 (UTC only) like string date/time value to a :obj:`datetime.datetime` object. :param str iso: ISO8061 string :rtype: datetime.datetime """ formats = ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.%f"] if 'T' in iso: formats = ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S.%fZ"] for fmt in formats: try: return datetime.datetime.strptime(iso, fmt) except ValueError: continue raise ValueError("Couldn't parse ISO8061 string '{}'".format(iso))
0dae4fb7828f7319afa7190deca6ae4fda5ffd1d
3,657,979
from typing import Optional from typing import Dict from typing import Union def groupstatus(aid: int, state: int = 0) -> EndpointResult: """Retrieve anime release status for different groups. :param aid: anidb anime id :type aid: int :param state: release state. int 1 to 6. Example: zenchi.mappings.group_status.ONGOING :type state: int, optional :return: a tuple (data, code). data is a dictionary with the keys: if code == (325, 330: :message str: NO SUCH GROUPS FOUND, NO SUCH ANIME if code == 225: :status: List of dictionaries with the following keys: :group_id int: :group_name str: :completion_state int: :last_episode_number int: :rating int: :votes int: :episode_range str: :truncated bool: if the response was truncated because it didn't fit the UDP packet, this will be True. :rtype: EndpointResult """ def cb(code: int, response: str) -> Optional[EndpointDict]: if code in (325, 330): return dict(message=response_message[code]) if code == 225: result = [] groups_data = response.splitlines()[1:] truncated = False for group_data in groups_data: parts = group_data.split("|") if len(parts) < 7: logger.warning( "Response was truncated, too much data for UDP packet." ) truncated = True break result.append( { "group_id": int(parts[0]), "group_name": parts[1], "completion_state": int(parts[2]), "last_episode_number": int(parts[3]), "rating": int(parts[4]), "votes": int(parts[5]), "episode_range": parts[6], } ) return dict(status_list=result, truncated=truncated) return None params: Dict[str, Union[str, int]] = dict(aid=aid) if state: params["state"] = state return send("GROUPSTATUS", params, cb)
f81ab06c8d47b9660cac9bde76978a722a13f49f
3,657,980
def get_communities_codes(communities, fields=None, community_field='Community'): """From the postal code conversion file, select entries for the `communities`. This function is similar to get_community_codes, but works if `communities` and `fields` are strings or lists of strings. """ if not isinstance(communities, pd.DataFrame) and not isinstance(communities, pd.Series): communities = pd.Series(communities, name=community_field) df = _pccf_df.merge(communities, on=community_field) return df if fields is None else df[ensure_list(fields) + [community_field]].drop_duplicates()
552fef722cd138f1a935755349116c89e0df3e3b
3,657,981
from vba import VBA from dataFrame import DF def GLMFit_(file, designMatrix, mask, outputVBA, outputCon, fit="Kalman_AR1"): """ Call the GLM Fit function with apropriate arguments Parameters ---------- file designmatrix mask outputVBA outputCon fit='Kalman_AR1' Returns ------- glm, a vba.VBA instance representing the GLM """ if fit == "Kalman_AR1": model = "ar1" method = "kalman" elif fit == "Ordinary Least Squares": method = "ols" model="spherical" elif fit == "Kalman": method = "kalman" model = "spherical" s = dict() s["GlmDumpFile"] = outputVBA s["ConfigFilePath"] = outputCon s["DesignFilePath"] = designMatrix tab = DF.read(designMatrix) glm = VBA(tab, mask_url=mask, create_design_mat = False, mri_names = file, model = model, method = method) glm.fit() glm.save(s) return glm
25ced91bc6c865faaffab30278d59aad6a475d4f
3,657,982
from typing import Iterable def get_stoch_rsi(quotes: Iterable[Quote], rsi_periods: int, stoch_periods: int, signal_periods: int, smooth_periods: int = 1): """Get Stochastic RSI calculated. Stochastic RSI is a Stochastic interpretation of the Relative Strength Index. Parameters: `quotes` : Iterable[Quote] Historical price quotes. `rsi_periods` : int Number of periods for the RSI. `stoch_periods` : int Number of periods for the Stochastic. `signal_periods` : int Number of periods for the Stochastic RSI SMA signal line. `smooth_periods` : int, defaults 1 Number of periods for Stochastic Smoothing. Use 1 for Fast or 3 for Slow. Returns: `StochRSIResults[StochRSIResult]` StochRSIResults is list of StochRSIResult with providing useful helper methods. See more: - [Stochastic RSI Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/StochRsi/#content) - [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content) """ stoch_rsi_results = CsIndicator.GetStochRsi[Quote](CsList(Quote, quotes), rsi_periods, stoch_periods, signal_periods, smooth_periods) return StochRSIResults(stoch_rsi_results, StochRSIResult)
b548a620ef3b3bc4cb37049d1dfb29aac442b394
3,657,983
def PUtilHann (inUV, outUV, err, scratch=False): """ Hanning smooth a UV data set returns smoothed UV data object inUV = Python UV object to smooth Any selection editing and calibration applied before average. outUV = Predefined UV data if scratch is False, ignored if scratch is True. err = Python Obit Error/message stack scratch = True if this is to be a scratch file (same type as inUV) """ ################################################################ if inUV.myClass=='AIPSUVData': raise TypeError("Function unavailable for "+inUV.myClass) # Checks if not inUV.UVIsA(): raise TypeError("inUV MUST be a Python Obit UV") if ((not scratch) and (not outUV.UVIsA())): raise TypeError("outUV MUST be a Python Obit UV") if not OErr.OErrIsA(err): raise TypeError("err MUST be an OErr") # # Create output for scratch if scratch: outUV = UV("None") outUV.me = Obit.UVUtilHann(inUV.me, scratch, outUV.me, err.me) if err.isErr: OErr.printErrMsg(err, "Error Hanning UV data") # Get scratch file info if scratch: PUVInfo (outUV, err) return outUV # end PUtilHann
a53f8d442055b2d575b36f49a96b68f6c6eff7ed
3,657,984
def str2bytes(seq): """ Converts an string to a list of integers """ return map(ord,str(seq))
7afe8e40cd4133c59be673b537f2717591b093cf
3,657,985
def __downloadFilings(cik: str) -> list: """Function to download the XML text of listings pages for a given CIK from the EDGAR database. Arguments: cik {str} -- Target CIK. Returns: list -- List of page XML, comprising full listing metadata for CIK. """ idx = 0 # Current page index end = False # Flags for loop count = 100 # Number of results per page (limited by SEC) # Text indicating next page exists next_page_text = 'rel="next" type="application/atom+xml" />' pages = [] while not end: # Making request page_text = __makeRequest(cik=cik, start_idx=idx, count=count) end = (page_text.find(next_page_text) == -1) # Update end flag idx += count # Increment index for next page pages.append(page_text) # Save page text return pages
c98996d3607076ed0328a5e0621ef015037ddc2e
3,657,986
def KK_RC43_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) )
9f88b73ac5da422069e67af28c15c2846178169b
3,657,987
from .column import ColumnVirtualConstant def vconstant(value, length, dtype=None, chunk_size=1024): """Creates a virtual column with constant values, which uses 0 memory. :param value: The value with which to fill the column :param length: The length of the column, i.e. the number of rows it should contain. :param dtype: The preferred dtype for the column. :param chunk_size: Could be used to optimize the performance (evaluation) of this column. """ return ColumnVirtualConstant(value=value, length=length, dtype=dtype, chunk_size=chunk_size)
b712ec9f1aea2f65f1f992cd3b23ab671339f97a
3,657,988
def gen_color_palette(n: int): """ Generates a hex color palette of size n, without repeats and only light colors (easily visible on dark background). Adapted from code by 3630 TAs Binit Shah and Jerred Chen Args: n (int): number of clouds, each cloud gets a unique color """ palette = [] do_replace = False if len(COLOR_OPTIONS) >= n else True for i in np.random.choice(len(COLOR_OPTIONS), n, replace=do_replace): palette.append(COLOR_OPTIONS[i]) return palette
6b1004674d1448cdcca8c3500b149b1602e0045f
3,657,991
def absolute_vorticity(u, v, dx, dy, lats, dim_order='yx'): """Calculate the absolute vorticity of the horizontal wind. Parameters ---------- u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. lats : (M, N) ndarray latitudes of the wind data Returns ------- (M, N) ndarray absolute vorticity Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ f = coriolis_parameter(lats) relative_vorticity = vorticity(u, v, dx, dy, dim_order=dim_order) return relative_vorticity + f
9ae200b3a8b8415f67fc640b0702bc5272c77d3a
3,657,992
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = input.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return input * random_tensor
289ae545fa184bb459275685d3a2894e5219db2e
3,657,993
from typing import Dict from typing import Any from typing import List import secrets def ask_user_config() -> Dict[str, Any]: """ Ask user a few questions to build the configuration. Interactive questions built using https://github.com/tmbo/questionary :returns: Dict with keys to put into template """ questions: List[Dict[str, Any]] = [ { "type": "confirm", "name": "dry_run", "message": "Do you want to enable Dry-run (simulated trades)?", "default": True, }, { "type": "text", "name": "stake_currency", "message": "Please insert your stake currency:", "default": 'USDT', }, { "type": "text", "name": "stake_amount", "message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):", "default": "100", "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val), "filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"' if val == UNLIMITED_STAKE_AMOUNT else val }, { "type": "text", "name": "max_open_trades", "message": f"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):", "default": "3", "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val), "filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"' if val == UNLIMITED_STAKE_AMOUNT else val }, { "type": "text", "name": "timeframe", "message": "Please insert your desired timeframe (e.g. 5m):", "default": "5m", }, { "type": "text", "name": "fiat_display_currency", "message": "Please insert your display Currency (for reporting):", "default": 'USD', }, { "type": "select", "name": "exchange_name", "message": "Select exchange", "choices": [ "binance", "binanceus", "bittrex", "kraken", "ftx", "kucoin", "gateio", Separator(), "other", ], }, { "type": "autocomplete", "name": "exchange_name", "message": "Type your exchange name (Must be supported by ccxt)", "choices": available_exchanges(), "when": lambda x: x["exchange_name"] == 'other' }, { "type": "password", "name": "exchange_key", "message": "Insert Exchange Key", "when": lambda x: not x['dry_run'] }, { "type": "password", "name": "exchange_secret", "message": "Insert Exchange Secret", "when": lambda x: not x['dry_run'] }, { "type": "password", "name": "exchange_key_password", "message": "Insert Exchange API Key password", "when": lambda x: not x['dry_run'] and x['exchange_name'] == 'kucoin' }, { "type": "confirm", "name": "telegram", "message": "Do you want to enable Telegram?", "default": False, }, { "type": "password", "name": "telegram_token", "message": "Insert Telegram token", "when": lambda x: x['telegram'] }, { "type": "text", "name": "telegram_chat_id", "message": "Insert Telegram chat id", "when": lambda x: x['telegram'] }, { "type": "confirm", "name": "api_server", "message": "Do you want to enable the Rest API (includes FreqUI)?", "default": False, }, { "type": "text", "name": "api_server_listen_addr", "message": "Insert Api server Listen Address (best left untouched default!)", "default": "127.0.0.1", "when": lambda x: x['api_server'] }, { "type": "text", "name": "api_server_username", "message": "Insert api-server username", "default": "freqtrader", "when": lambda x: x['api_server'] }, { "type": "text", "name": "api_server_password", "message": "Insert api-server password", "when": lambda x: x['api_server'] }, ] answers = prompt(questions) if not answers: # Interrupted questionary sessions return an empty dict. raise OperationalException("User interrupted interactive questions.") # Force JWT token to be a random string answers['api_server_jwt_key'] = secrets.token_hex() return answers
7697ba65c7ba7f73b81af3ae3575beb0eb9b30b8
3,657,994
def generate_menusystem(): """ Generate Top-level Menu Structure (cached for specified timeout) """ return '[%s] Top-level Menu System' % timestamp()
eb3575835889af768887f3071816d0f22f867568
3,657,995
def gnomonic_proj(lon, lat, lon0=0, lat0=0): """ lon, lat : arrays of the same shape; longitude and latitude of points to be projected lon0, lat0: floats, longitude and latitude in radians for the tangency point --------------------------- Returns the gnomonic projection, x, y https://mathworld.wolfram.com/GnomonicProjection.html """ cosc = sin(lat0)*sin(lat) + cos(lat0)*cos(lat)*cos(lon-lon0) x = cos(lat)*sin(lon-lon0)/cosc y = (cos(lat0)*sin(lat) - sin(lat0)*cos(lat)*cos(lon-lon0))/cosc return x, y
61daaee7bc0ca5dd901582adc03ec6c36ddf2ef2
3,657,998
def local_pluggables(pluggable_type): """ Accesses pluggable names Args: pluggable_type (Union(PluggableType,str)): The pluggable type Returns: list[str]: pluggable names Raises: AquaError: if the type is not registered """ _discover_on_demand() if isinstance(pluggable_type, str): for ptype in PluggableType: if ptype.value == pluggable_type: pluggable_type = ptype break if not isinstance(pluggable_type, PluggableType): raise AquaError( 'Invalid pluggable type {}'.format(pluggable_type)) if pluggable_type not in _REGISTRY_PLUGGABLE.registry: raise AquaError('{} not registered'.format(pluggable_type)) return [pluggable.name for pluggable in _REGISTRY_PLUGGABLE.registry[pluggable_type].values()]
8626e931da1fd33d76cef4ed85b6ea1a7d7e907d
3,657,999
def view_folio_contact(request, folio_id=None): """ View contact page within folio """ folio = get_object_or_404(Folio, pk=folio_id) if not folio.is_published and folio.author_id != request.user: return render( request, 'showcase/folio_is_not_published.html' ) author = get_object_or_404( UserAccount, pk=folio.author_id.id ) message_form = SendAuthorMessageForm() context = { "user": request.user, "folio": folio, "author": author, "form": message_form } return render( request, 'showcase/view_folio_contact.html', context=context)
0269fea6322486912cdd462961fb847ffd8d038a
3,658,000
def faom03(t): """ Wrapper for ERFA function ``eraFaom03``. Parameters ---------- t : double array Returns ------- c_retval : double array Notes ----- The ERFA documentation is below. - - - - - - - - - - e r a F a o m 0 3 - - - - - - - - - - Fundamental argument, IERS Conventions (2003): mean longitude of the Moon's ascending node. Given: t double TDB, Julian centuries since J2000.0 (Note 1) Returned (function value): double Omega, radians (Note 2) Notes: 1) Though t is strictly TDB, it is usually more convenient to use TT, which makes no significant difference. 2) The expression used is as adopted in IERS Conventions (2003) and is from Simon et al. (1994). References: McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 Copyright (C) 2013-2017, NumFOCUS Foundation. Derived, with permission, from the SOFA library. See notes at end of file. """ c_retval = ufunc.faom03(t) return c_retval
3e3d1c7e650d6034ed0793e4f1bc8605e9e82e32
3,658,001
from datetime import datetime import calendar def get_dtindex(interval, begin, end=None): """Creates a pandas datetime index for a given interval. Parameters ---------- interval : str or int Interval of the datetime index. Integer values will be treated as days. begin : datetime Datetime index start date. end : datetime, optional Datetime index end date, defaults to current date. Returns ------- dtindex : pandas.tseries.index.DatetimeIndex Datetime index. """ if end is None: end = datetime.now() if interval in ['dekad', 'dekadal', 'decadal', 'decade']: dtindex = dekad_index(begin, end) elif interval in ['daily', 'day', '1']: dtindex = pd.date_range(begin, end, freq='D') elif interval in ['weekly', 'week', '7']: begin2 = begin - timedelta(begin.weekday()) + timedelta(6) dtindex = pd.date_range(begin2, end, freq='7D') elif interval in ['monthly', 'month']: lday = calendar.monthrange(end.year, end.month)[1] end = datetime(end.year, end.month, lday) dtindex = pd.date_range(begin, end, freq='M') if type(interval) is int: dtindex = pd.date_range(begin, end, freq=str(str(interval) + 'D')) return dtindex
32f0992365b075fb8601276bd3680c7db43a677e
3,658,002
import numpy def asanyarray(a, dtype=None, order=None): """Converts the input to an array, but passes ndarray subclasses through. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes scalars, lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. dtype : dtype, optional By default, the dtype is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-stype) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray or an ndarray subclass Array interpretation of *a*. If *a* is a subclass of ndarray, it is returned as-is and no copy is performed. See Also -------- asarray : Converts the input to an array. Examples -------- Convert a list into an array: >>> import nlcpy as vp >>> a = [1, 2] >>> vp.asanyarray(a) array([1, 2]) """ if isinstance(a, ndarray): if dtype is None and order is None: return a elif dtype is not None and order is None: if a.dtype == numpy.dtype(dtype): return a elif dtype is None and order is not None: order_char = internal._normalize_order(order) order_char = chr(core._update_order_char(a, order_char)) if order_char == 'C' and a._c_contiguous: return a if order_char == 'F' and a._f_contiguous: return a else: order_char = internal._normalize_order(order) order_char = chr(core._update_order_char(a, order_char)) if a.dtype == numpy.dtype(dtype) and \ (order_char == 'C' and a._c_contiguous or order_char == 'F' and a._f_contiguous): return a return core.array(a, dtype=dtype, order=order)
c079d114ab224c487a65929b7710450262c66733
3,658,003
import base64 import struct from datetime import datetime def parse_fernet_timestamp(ciphertext): """ Returns timestamp embedded in Fernet-encrypted ciphertext, converted to Python datetime object. Decryption should be attempted before using this function, as that does cryptographically strong tests on the validity of the ciphertext. """ try: decoded = base64.urlsafe_b64decode(ciphertext) # This is a value in Unix Epoch time epoch_timestamp = struct.unpack('>Q', decoded[1:9])[0] timestamp = datetime(1970, 1, 1) + timedelta(seconds=epoch_timestamp) return timestamp except struct.error as e: raise ValueError(e.message)
216d314c84679cc5806d6a483f68bff485375b36
3,658,005
def tagcloud(guids): """Get "tag cloud" for the search specified by guids Same return format as taglist, impl is always False. """ guids = set(guids) range = (0, 19 + len(guids)) tags = request.client.find_tags("EI", "", range=range, guids=guids, order="-post", flags="-datatag") return [(tagfmt(t.name), t, False) for t in tags if t.guid not in guids]
fb94fab24040b3c38a68a2731d9b1bba0cccd3bc
3,658,006
def _ValidateContent(path, expected_content): """Helper to validate the given file's content.""" assert os.path.isfile(path), 'File didn\'t exist: %r' % path name = os.path.basename(path) current_content = open(path).read() if current_content == expected_content: print '%s is good.' % name else: try: open(path, 'w').write(expected_content) print 'Updated %s.' % name except IOError as e: if e.errno != errno.EACCES: raise print '%r needs to be updated but is not writable.' % path return False return True
ddf6e3089f66d157f281655357753ff2b746d4a2
3,658,007
from typing import Optional from typing import Sequence def api_ofrecord_image_decoder_random_crop( input_blob: remote_blob_util.BlobDef, blob_name: str, color_space: str = "BGR", num_attempts: int = 10, seed: Optional[int] = None, random_area: Sequence[float] = [0.08, 1.0], random_aspect_ratio: Sequence[float] = [0.75, 1.333333], name: str = "OFRecordImageDecoderRandomCrop", ) -> remote_blob_util.BlobDef: """This operator is an image decoder with random crop. Args: input_blob (BlobDef): The input Blob blob_name (str): The name of the Blob color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR". num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10. seed (Optional[int], optional): The random seed. Defaults to None. random_area (Sequence[float], optional): The random cropping area. Defaults to [0.08, 1.0]. random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to [0.75, 1.333333]. name (str, optional): The name for the operation. Defaults to "OFRecordImageDecoderRandomCrop". Returns: BlobDef: The random cropped Blob For example: .. code-block:: python import oneflow as flow import oneflow.typing as tp from typing import Tuple @flow.global_function(type="predict") def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]: batch_size = 16 color_space = "RGB" # our ofrecord file path is "./dataset/part-0" ofrecord = flow.data.ofrecord_reader( "./imgdataset", batch_size=batch_size, data_part_num=1, part_name_suffix_length=-1, part_name_prefix='part-', random_shuffle=True, shuffle_after_epoch=True, ) image = flow.data.OFRecordImageDecoderRandomCrop( ofrecord, "encoded", color_space=color_space ) res_image, scale, new_size = flow.image.Resize( image, target_size=(224, 224) ) label = flow.data.OFRecordRawDecoder( ofrecord, "class/label", shape=(1, ), dtype=flow.int32 ) return res_image, label if __name__ == "__main__": images, labels = ofrecord_reader_job() # images.shape (16, 224, 224, 3) """ assert isinstance(name, str) if seed is not None: assert name is not None module = flow.find_or_create_module( name, lambda: OFRecordImageDecoderRandomCropModule( blob_name=blob_name, color_space=color_space, num_attempts=num_attempts, random_seed=seed, random_area=random_area, random_aspect_ratio=random_aspect_ratio, name=name, ), ) return module(input_blob)
bcf8ad7deb97677e52b04e3204281a7ecc89c89c
3,658,009
def student_add_information(adding_student_id, student_information): """ 用于添加学生的详细信息 :@param adding_student_id: int :@param student_information: dict or str :@return : 运行状态(True or False) """ if type(student_information) == dict: adding_information = student_information elif type(student_information) == str: adding_information = {} tmp_key = '' tmp_adding_key = '' tmp_value = '' state = 'write_key' for k in student_information: # 判断当前遍历到哪里 if k == ':': tmp_value = '' state = 'write_value' continue elif k == '\n': tmp_adding_key = tmp_key tmp_key = '' state = 'write_key' adding_information[tmp_adding_key] = tmp_value continue # 判断是否便利到节点 if state == 'write_key': tmp_key += k elif state == 'write_value': tmp_value += k else: return False, 2 times = 0 adding_info_list = [adding_student_id] for i in adding_information.keys(): times += 1 adding_info_list.append(adding_information.get(i)) for j in range(0, 5-times): adding_info_list.append(None) adding_info_tuple = tuple(adding_info_list) adding_info_final = [adding_info_tuple] cur.executemany("insert into student_info values(%s,%s,%s,%s,%s,%s)", adding_info_final) conn.commit()
8a44177b90c1f3e10077313f6765a4699ded676b
3,658,010
async def get_show_by_month_day(month: conint(ge=1, le=12), day: conint(ge=1, le=31)): """Retrieve a Show object, based on month and day, containing: Show ID, date and basic information.""" try: show = Show(database_connection=_database_connection) shows = show.retrieve_by_month_day(month, day) if not shows: raise HTTPException( status_code=404, detail=f"Shows for month {month:02d} and {day:02d} not found", ) else: return {"shows": shows} except ValueError: raise HTTPException( status_code=404, detail=f"Shows for month {month:02d} and {day:02d} not found", ) except ProgrammingError: raise HTTPException( status_code=500, detail="Unable to retrieve show information from the database", ) except DatabaseError: raise HTTPException( status_code=500, detail="Database error occurred while retrieving " "show information from the database", )
e774f61254a3d7cdfc9a49ca1a9eea4f65853f55
3,658,012
import random def generate_random_tag(length): """Generate a random alphanumeric tag of specified length. Parameters ---------- length : int The length of the tag, in characters Returns ------- str An alphanumeric tag of specified length. Notes ----- The generated tag will not use possibly ambiguous characters from this set: - '0' and '1' - 'i' and 'I' - 'l' and 'L' - 'o' and 'O' """ characters_set = ('23456789' + 'abcdefghjkmnpqrstuvwxyz' + 'ABCDEFGHJKMNPQRSTUVWXYZ') return ''.join([characters_set[int(random() * len(characters_set))] for _ in range(length)])
b62a103663b69f0a27d8ba23134473dc01932409
3,658,013
import io def loadmat(filename, check_arrays=False, **kwargs): """ Big thanks to mergen on stackexchange for this: http://stackoverflow.com/a/8832212 This function should be called instead of direct scipy.io.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects. """ kwargs["struct_as_record"] = False kwargs["squeeze_me"] = True data = io.loadmat(filename, **kwargs) return _check_keys(data, check_arrays)
3b054cbabc03b468ec0c80a4ed544b1c054ef223
3,658,014
def _export_output_to_tensors(export_output): """Get a list of `Tensors` used in `export_output`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Returns: a list of tensors used in export_output. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): return [export_output.scores, export_output.classes] elif isinstance(export_output, export_output_lib.RegressionOutput): return [export_output.value] elif isinstance(export_output, export_output_lib.PredictOutput): return export_output.outputs.values() else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
913c1b232f8ac6e66e9104c055c9d34726db1027
3,658,015
import logging def train_city_s1(city:str, pollutant= 'PM2.5', n_jobs=-2, default_meta=False, search_wind_damp=False, choose_cat_hour=False, choose_cat_month=True, add_weight=True, instr='MODIS', op_fire_zone=False, op_fire_twice=False, op_lag=True, search_tpot=False, main_data_folder: str = '../data/', model_folder='../models/', report_folder='../reports/'): """Training pipeline from process raw data, hyperparameter tune, and save model. Args: city: city name pollutant(optional): pollutant name n_jobs(optional): number of CPUs to use during optimization default_meta(optional): if True, override meta setting with the default value search_wind_damp(optional): if True, search in four options of the fire features. add_weight(optional): if True, use non-uniform weight when fitting and evaluating the model. instr(optional): choose hotspots detection instrument choose_cat_hour(optional): if True, see if adding/not adding hour as catergorical variable is better choose_cat_month(optional): if True, see if adding/not adding month as catergorical variable is better op_fire_twice(optiohnal): if True, optimize fire data after optimizing lag search_tpot(optional): If True, also search for other model using TPOT main_data_folder(optional): main data folder for initializing Dataset object [default:'../data/] model_folder(optional): model folder for initializing Dataset object [default:'../models/'] report_folder(optional): folder to save figure for initializing Dataset object [default:'../reports/'] Returns: dataset: dataset object model: model object poll_meta(dict): parameter dictionary """ # start logging set_logging(level=10) logger = logging.getLogger(__name__) # initialize a trainer object trainer = Trainer(city=city, pollutant=pollutant, instr=instr) trainer.n_jobs = n_jobs if default_meta: trainer.get_default_meta() if ~ add_weight: trainer.dataset.add_weight = 0 #if 'x_cols_org' in trainer.poll_meta.keys(): # trainer.dataset.x_cols = trainer.dataset.x_cols_org = trainer.poll_meta['x_cols_org'] # look for the best rf model trainer.op_rf(fire_dict=trainer.dataset.fire_dict) # remove columns trainer.op2_rm_cols() logger.info(f'current columns {trainer.dataset.x_cols_org}') # op fire trainer.op_fire(x_cols=trainer.dataset.x_cols_org, search_wind_damp=search_wind_damp) if op_fire_zone: trainer.op_fire_zone(step=50) if choose_cat_hour: trainer.choose_cat_hour() if choose_cat_month: trainer.choose_cat_month() if op_lag: # see if adding lag improve things if trainer.dataset.with_interact: # use smaller lag range trainer.op4_lag(lag_range=[1, 20]) else: trainer.op4_lag() else: print('skip adding lag') trainer.dataset.lag_dict = {'n_max': 1, 'step': 1, 'roll':True} trainer.dataset.build_lag( lag_range=np.arange( 1, trainer.dataset.lag_dict['n_max'], trainer.dataset.lag_dict['step']), roll=trainer.dataset.lag_dict['roll']) if op_fire_twice: trainer.op_fire(x_cols=trainer.dataset.x_cols, with_lag=True, search_wind_damp=search_wind_damp) # serach rf model again trainer.op6_rf() trainer.final_fit() # save plot trainer.save_feat_imp(with_interact=trainer.dataset.with_interact, filename=trainer.dataset.report_folder +f'{trainer.poll_name}_rf_fea_op2_nolag.png', title='rf feature of importance') trainer.save_all() if search_tpot: trainer.search_tpot() # turn of logging logging.shutdown() return trainer.dataset, trainer.model, trainer
43cd4ff89068feba1bca3c316e40b19f852c1da2
3,658,016
def download_raw_pages_content(pages_count): """download habr pages by page count""" return [fetch_raw_content(page) for page in range(1, pages_count + 1)]
77e369a986ff09887a71d996226d147fef9a36ec
3,658,018
def tseries2bpoframe(s: pd.Series, freq: str = "MS", prefix: str = "") -> pd.DataFrame: """ Aggregate timeseries with varying values to a dataframe with base, peak and offpeak timeseries, grouped by provided time interval. Parameters ---------- s : Series Timeseries with hourly or quarterhourly frequency. freq : {'MS' (month, default) 'QS' (quarter), 'AS' (year)} Target frequency. prefix : str, optional (default: '') If specified, add this to the column names of the returned dataframe. Returns ------- DataFrame Dataframe with base, peak and offpeak values (as columns). Index: downsampled timestamps at provided frequency. Notes ----- Can only be used for values that are 'averagable' over a time period, like power [MW] and price [Eur/MWh]. Not for e.g. energy [MWh], revenue [Eur], and duration [h]. In: ts_left 2020-01-01 00:00:00+01:00 41.88 2020-01-01 01:00:00+01:00 38.60 2020-01-01 02:00:00+01:00 36.55 ... 2020-12-31 21:00:00+01:00 52.44 2020-12-31 22:00:00+01:00 51.86 2020-12-31 23:00:00+01:00 52.26 Freq: H, Name: p, Length: 8784, dtype: float64 Out: base peak offpeak ts_left 2020-01-01 00:00:00+01:00 35.034906 42.530036 30.614701 2020-02-01 00:00:00+01:00 21.919009 33.295167 15.931557 ... ... ... 2020-11-01 00:00:00+01:00 38.785706 49.110873 33.226004 2020-12-01 00:00:00+01:00 43.519745 57.872246 35.055449 12 rows × 3 columns """ if freq not in ("MS", "QS", "AS"): raise ValueError( f"Parameter ``freq`` must be one of 'MS', 'QS', 'AS'; got '{freq}'." ) # Remove partial data s = trim_frame(s, freq) # Handle possible units. sin, units = (s.pint.magnitude, s.pint.units) if hasattr(s, "pint") else (s, None) # Do calculations. Use normal mean, because all rows have same duration. sout = sin.resample(freq).apply(lambda s: tseries2singlebpo(s, prefix)) # Handle possible units. if units is not None: sout = sout.astype(nits.pintunit(units)) return sout.unstack()
6b97bc3b8c925be68ba79e8a9abdc2795500df76
3,658,019
def calc_buffered_bounds( format, bounds, meters_per_pixel_dim, layer_name, geometry_type, buffer_cfg): """ Calculate the buffered bounds per format per layer based on config. """ if not buffer_cfg: return bounds format_buffer_cfg = buffer_cfg.get(format.extension) if format_buffer_cfg is None: return bounds geometry_type = normalize_geometry_type(geometry_type) per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name) if per_layer_cfg is not None: layer_geom_pixels = per_layer_cfg.get(geometry_type) if layer_geom_pixels is not None: assert isinstance(layer_geom_pixels, Number) result = bounds_buffer( bounds, meters_per_pixel_dim * layer_geom_pixels) return result by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get( geometry_type) if by_geometry_pixels is not None: assert isinstance(by_geometry_pixels, Number) result = bounds_buffer( bounds, meters_per_pixel_dim * by_geometry_pixels) return result return bounds
5bbf9720525126e3dcd000329493c894c8249771
3,658,020
async def read_users_me( current_user: models.User = Depends(security.get_current_active_user), ): """Get User data""" return current_user
4b2e37586a4e13074ec009f4cd7e64e7a357d539
3,658,021
from typing import Iterable from typing import Tuple def compute_qp_objective( configuration: Configuration, tasks: Iterable[Task], damping: float ) -> Tuple[np.ndarray, np.ndarray]: """ Compute the Hessian matrix :math:`H` and linear vector :math:`c` of the QP objective function: .. math:: \\frac{1}{2} \\Delta q^T H \\Delta q + c^T q The configuration displacement :math:`\\Delta q` is the output of inverse kinematics (we divide it by :math:`\\Delta t` to get a commanded velocity). Args: configuration: Robot configuration to read kinematics from. tasks: List of kinematic tasks to fulfill at (weighted) best. damping: weight of Tikhonov (everywhere) regularization. Its unit is `[cost]^2 / [tangent]` where `[tangent]` is "the" unit of robot velocities. Improves numerical stability, but larger values slow down all tasks. Returns: Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP objective. """ H = damping * configuration.tangent.eye c = configuration.tangent.zeros for task in tasks: H_task, c_task = task.compute_qp_objective(configuration) H += H_task c += c_task return (H, c)
623997bbaf7ce92c39084fa44960593b55a0b3a0
3,658,025
def _is_existing_account(respondent_email): """ Checks if the respondent already exists against the email address provided :param respondent_email: email of the respondent :type respondent_email: str :return: returns true if account already registered :rtype: bool """ respondent = party_controller.get_respondent_by_email(respondent_email) if not respondent: return False return True
4cb0462f748d0b80dbb12f89364d279a3436b632
3,658,026
import socket def basic_checks(server,port): """Perform basics checks on given host""" sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # 2 seconds timeout sock.settimeout(2) return sock.connect_ex((server,int(port))) == 0
4a31521089feb2c178bb5202fa818804dfe87142
3,658,027