content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def randomize_quaternion_along_z( mujoco_simulation: RearrangeSimulationInterface, random_state: RandomState ): """ Rotate goal along z axis and return the rotated quat of the goal """ quat = _random_quat_along_z(mujoco_simulation.num_objects, random_state) return rotation.quat_mul(quat, mujoco_simulation.get_target_quat(pad=False))
9bc29520ca8f00debf819bf4dee55cce43bb8483
1,370
from io import StringIO import json def init(model): """ Initialize the server. Loads pyfunc model from the path. """ app = flask.Flask(__name__) @app.route("/ping", methods=["GET"]) def ping(): # pylint: disable=unused-variable """ Determine if the container is working and healthy. We declare it healthy if we can load the model successfully. """ health = model is not None status = 200 if health else 404 return flask.Response(response="\n", status=status, mimetype="application/json") @app.route("/invocations", methods=["POST"]) def transformation(): # pylint: disable=unused-variable """ Do an inference on a single batch of data. In this sample server, we take data as CSV or json, convert it to a Pandas DataFrame, generate predictions and convert them back to CSV. """ # Convert from CSV to pandas if flask.request.content_type == CONTENT_TYPE_CSV: data = flask.request.data.decode("utf-8") csv_input = StringIO(data) data = parse_csv_input(csv_input=csv_input) elif flask.request.content_type == CONTENT_TYPE_JSON: global logged_pandas_records_format_warning if not logged_pandas_records_format_warning: _logger.warning( "**IMPORTANT UPDATE**: Starting in MLflow 0.9.0, requests received with a" " `Content-Type` header value of `%s` will be interpreted" " as JSON-serialized Pandas DataFrames with the `split` orientation, instead" " of the `records` orientation. The `records` orientation is unsafe because" " it may not preserve column ordering. Client code should be updated to" " either send serialized DataFrames with the `split` orientation and the" " `%s` content type (recommended) or use the `%s` content type with the" " `records` orientation. For more information, see" " https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.\n", CONTENT_TYPE_JSON, CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_JSON_RECORDS_ORIENTED, ) logged_pandas_records_format_warning = True data = parse_json_input( json_input=flask.request.data.decode("utf-8"), orientation="records" ) elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED: data = parse_json_input( json_input=flask.request.data.decode("utf-8"), orientation="records" ) elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED: data = parse_json_input( json_input=flask.request.data.decode("utf-8"), orientation="split" ) else: return flask.Response( response=( "This predictor only supports the following content types," " {supported_content_types}. Got '{received_content_type}'.".format( supported_content_types=CONTENT_TYPES, received_content_type=flask.request.content_type, ) ), status=415, mimetype="text/plain", ) # Do the prediction # pylint: disable=broad-except try: raw_predictions = model.predict(data) except Exception: _handle_serving_error( error_message=( "Encountered an unexpected error while evaluating the model. Verify" " that the serialized input Dataframe is compatible with the model for" " inference." ) ) predictions = get_jsonable_obj(raw_predictions, pandas_orientation="records") result = json.dumps(predictions, cls=NumpyEncoder) return flask.Response(response=result, status=200, mimetype="application/json") return app
525ffd789be94c1e70d987c4b64c8876d9260d72
1,371
def sample_tag(user, name='Comedy'): """Creates a sample Tag""" return Tag.objects.create(user=user, name=name)
2288d8344fcf931a9e932b46db9a65275425b427
1,372
from typing import Tuple def _middle_point(p1: np.ndarray, p2: np.ndarray) -> Tuple[int, int]: """Returns the middle point (x,y) between two points Arguments: p1 (np.ndarray): First point p2 (np.ndarray): Second point """ return tuple((p1 + p2) // 2)
dcca1c1eeb0fea8c9adebfc9cccca94cb7ab7a43
1,373
def filter_with_prefixes(value, prefixes): """ Returns true if at least one of the prefixes exists in the value. Arguments: value -- string to validate prefixes -- list of string prefixes to validate at the beginning of the value """ for prefix in prefixes: if value.startswith(prefix): return False return True
56b9bacedaa7aa06023e29d45809f6e9661ee483
1,374
def is_seq(x, step=1): """Checks if the elements in a list-like object are increasing by step Parameters ---------- x: list-like step Returns ------- True if elements increase by step, else false and the index at which the condition is violated. """ for i in range(1, len(x)): if not x[i] == (x[i - 1] + step): print('Not seq at: ', i) return False return True
032e12b86aa7e50dfba2ddccd244475f58d70b29
1,376
def delete_editor(userid): """ :param userid: a string representing the user's UW NetID :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned. """ url = _make_del_account_url(userid) return _process_resp(url, get_sea_resource(url), _is_editor_deleted )
494211289faa4b16206b9687d6f7f94a8adc992a
1,378
def ecio_quality_rating(value, unit): """ ECIO (Ec/Io) - Energy to Interference Ratio (3G, CDMA/UMTS/EV-DO) """ if unit != "dBm": raise ValueError("Unsupported unit '{:}'".format(unit)) rating = 0 if value > -2: rating = 4 elif -2 >= value > -5: rating = 3 elif -5 >= value > -10: rating = 2 elif value <= -10: rating = 1 return rating
4cc21012464b8476d026f9dfbc35b8b1ea3c2d85
1,379
def normalizeFilename(filename): """normalizeFilename(filename) Replace characters that are illegal in the Window's environment""" res = filename rep = { "*":"_", "\"":"\'", "/":" per ", "\\":"_", ",":"_", "|":"_", ":":";" } for frm, to in rep.iteritems(): res = res.replace(frm, to) return res.strip()
84239d7d4fd982b27a4e0f5d20f615f3f288af85
1,380
def __virtual__(): """ Check if macOS and PyObjC is available """ if not salt.utils.platform.is_darwin(): return (False, 'module: mac_wifi only available on macOS.') if not PYOBJC: return (False, 'PyObjC not available.') return __virtualname__
46d21f3546234984890ff147a300ee8241b69ae6
1,381
def rearrange_kernel(kernel, data_shape=None): """Rearrange kernel This method rearanges the input kernel elements for vector multiplication. The input kernel is padded with zeroes to match the image size. Parameters ---------- kernel : np.ndarray Input kernel array data_shape : tuple Shape of the data Returns ------- numpy.ndarray Rearanged matrix of kernel elements """ # Define kernel shape. kernel_shape = np.array(kernel.shape) # Set data shape if not provided. if isinstance(data_shape, type(None)): data_shape = kernel_shape else: data_shape = np.array(data_shape) # Set the length of the output matrix rows. vec_length = np.prod(data_shape) # Find the diffrence between the shape of the data and the kernel. shape_diff = data_shape - kernel_shape if np.any(shape_diff < 0): raise ValueError('Kernel shape must be less than or equal to the ' 'data shape') # Set the kernel radius. kernel_rad = kernel_shape // 2 # Rotate, pad and roll the input kernel. kernel_rot = np.pad(np.rot90(kernel, 2), ((0, shape_diff[0]), (0, shape_diff[1])), 'constant') kernel_rot = np.roll(np.roll(kernel_rot, -kernel_rad[1], axis=1), -kernel_rad[0], axis=0) return np.array([np.roll(np.roll(kernel_rot, i, axis=0), j, axis=1).reshape(vec_length) for i in range(data_shape[0]) for j in range(data_shape[1])])
a5117d56f520c3a8f79ba2baea68d0b4d516158c
1,382
def exportTable(request_id, params): """Starts a table export task running. This is a low-level method. The higher-level ee.batch.Export.table object is generally preferred for initiating table exports. Args: request_id (string): A unique ID for the task, from newTaskId. If you are using the cloud API, this does not need to be from newTaskId, (though that's a good idea, as it's a good source of unique strings). It can also be empty, but in that case the request is more likely to fail as it cannot be safely retried. params: The object that describes the export task. If you are using the cloud API, this should be an ExportTableRequest. However, the "expression" parameter can be the actual FeatureCollection to be exported, not its serialized form. Returns: A dict with information about the created task. If you are using the cloud API, this will be an Operation. """ params = params.copy() return _prepare_and_run_export( request_id, params, _get_cloud_api_resource().projects().table().export)
e4dd22264c070315351bc3dd51061bc4948c9bda
1,383
def template_check(value): """Check if a rendered template string equals true. If value is not a string, return value as is. """ if isinstance(value, str): return value.lower() == "true" return value
3733db5c107068e815bac079fdef1a450f7acdc9
1,385
def return_npc(mcc, mnc): """ Format MCC and MNC into a NPC. :param mcc: Country code. :type mcc: int :param mnc: Network code. :type mnc: int """ return "{0}{1}30".format(str(mcc).zfill(3), str(mnc).zfill(3))
0ae5952fd7b026c2c90c72046f63ca4d08dacf06
1,386
import math def class_to_bps(bw_cls): """ Convert a SIBRA bandwidth class to bps (Bits Per Second). Class 0 is a special case, and is mapped to 0bps. :param float bw_cls: SIBRA bandwidth class. :returns: Kbps of bandwidth class :rtype: float """ if bw_cls == 0: return 0 bw_base = math.sqrt(pow(2, bw_cls - 1)) return SIBRA_BW_FACTOR * bw_base
02d0b4fbf5655318e6807bdd8c41fdfb59010ba4
1,387
def _get_capacity(): """Return constant values for dam level capacities. Storage capacity values are measured in million cubic metres i.e. Megalitres or Ml. Source: https://en.wikipedia.org/wiki/Western_Cape_Water_Supply_System @return capacity: Dict object containing maximum capacities of Western Cape dams. Includes aggregate values for small dams, big six dams and all dams. """ big_six_capacity = { 'Theewaterskloof': 480188, 'Wemmershoek': 58644, 'Steensbras Lower': 33517, 'Steenbras Upper': 31757, 'Voëlvlei': 164095, 'Berg River': 130010, } small_capacity = { 'Hely-Hutchinson': 925, 'Woodhead': 954, 'Victoria': 128, 'De Villiers': 243, 'Kleinplaats': 1368, 'Lewis Gay': 182, 'Land-en-Zeezicht': 451, } capacity = {**big_six_capacity, **small_capacity} capacity['Big Six Dams'] = sum(big_six_capacity.values()) capacity['Small Dams'] = sum(small_capacity.values()) capacity['All Dams'] = capacity['Small Dams'] + capacity['Big Six Dams'] return capacity
01d1a5e7470d578296e285e2e00cd44eaf00d15c
1,388
def login_required(f): """ Decorator to use if a view needs to be protected by a login. """ @wraps(f) def decorated_function(*args, **kwargs): if not 'username' in session: return redirect(url_for('login')) return f(*args, **kwargs) return decorated_function
d09069e65c64c06885708b10ca70f5f319389c7a
1,389
def test_bare_except() -> None: """Bare `except` to handle any uncaught exceptions.""" def reciprocal_of(value: float) -> float: try: return 1 / value except ZeroDivisionError: raise except: raise pytest.raises(TypeError, reciprocal_of, "a")
8cce2efcd850ca546f092ec0988d69e8d576e500
1,390
def _get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image in BGR order Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= cfg.PIXEL_MEANS im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] # print('cfg.TEST.SCALES: {}'.format(cfg.TEST.SCALES)), for target_size in cfg.TEST.SCALES: im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE: im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) # blob /= 255.0 return blob, np.array(im_scale_factors)
61a10be02f258680b0c103398deee9d72870035b
1,391
def toiter(x): """Convert to iterable. If input is iterable, returns it. Otherwise returns it in a list. Useful when you want to iterate over something (like in a for loop), and you don't want to have to do type checking or handle exceptions when it isn't a sequence""" if iterable(x): return x else: return [x]
ef9716b65893ca614dd53cc6fa7ae17b6cce2a35
1,393
def ap_date(value): """ Converts a date string in m/d/yyyy format into AP style. """ if not value: return '' bits = unicode(value).split('/') month, day, year = bits output = AP_MONTHS[int(month) - 1] output += ' ' + unicode(int(day)) output += ', ' + year return output
4ca1dab0775141f548946072e0208502d54bc784
1,394
def dump_sql(fp, query: str, encoding="utf8"): """ Write a given query into a file path. """ query = ljustify_sql(query) for line in query: fp.write(bytes(line, encoding=encoding)) return fp
b6a847dbfccb17c0cf7bd3590eee69783d83030c
1,395
def make_design_matrix(stim, d=25): """Create time-lag design matrix from stimulus intensity vector. Args: stim (1D array): Stimulus intensity at each time point. d (number): Number of time lags to use. Returns X (2D array): GLM design matrix with shape T, d """ padded_stim = np.concatenate([np.zeros(d - 1), stim]) T = len(stim) X = np.zeros((T, d)) for t in range(T): X[t] = padded_stim[t:t + d] return X
5b1759076b9e0f44ea338a4e72d2f1a76d3ccc3b
1,396
def _fit_subpixel_2d(image, coord, radius, voxel_size_yx, psf_yx): """Fit a gaussian in a 2-d image. Parameters ---------- image : np.ndarray Image with shape (y, x). coord : np.ndarray, np.int64 Coordinate of the spot detected, with shape (2,). One coordinate per dimension (yx coordinates). radius : Tuple[float] Radius in pixels of the detected spots, one element per dimension. voxel_size_yx : int or float Size of a voxel on the yx plan, in nanometer. psf_yx : int or float Theoretical size of the PSF emitted by a spot in the yx plan, in nanometer. Returns ------- new_coord : List[float] Coordinates of the spot centroid with a subpixel accuracy (one element per dimension). """ # extract spot image image_spot, bbox_low = _get_spot_surface( image, coord[0], coord[1], radius[0]) # fit gaussian try: parameters = modelize_spot(image_spot, voxel_size_z=None, voxel_size_yx=voxel_size_yx, psf_z=None, psf_yx=psf_yx, return_coord=True) # format coordinates and ensure it is fitted within the spot image y_max, x_max = image_spot.shape coord_y = parameters[0] / voxel_size_yx if coord_y < 0 or coord_y > y_max: coord_y = coord[0] else: coord_y += bbox_low[0] coord_x = parameters[1] / voxel_size_yx if coord_x < 0 or coord_x > x_max: coord_x = coord[1] else: coord_x += bbox_low[1] new_coord = [coord_y, coord_x] # if a spot is ill-conditioned, we simply keep its original coordinates except RuntimeError: new_coord = list(coord) return new_coord
6e05b395ae93319de599283fe041f681b5ee039c
1,397
def density(sisal,temp,pres,salt=None,dliq=None,chkvals=False, chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False, mathargs=None): """Calculate sea-ice total density. Calculate the total density of a sea-ice parcel. :arg float sisal: Total sea-ice salinity in kg/kg. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg salt: Seawater salinity in kg/kg. If unknown, pass None (default) and it will be calculated. :type salt: float or None :arg dliq: Seawater liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg dvap: Water vapour density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dvap: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg salt0: Initial guess for the seawater salinity in kg/kg. If None (default) then `_approx_tp` is used. :type salt0: float or None :arg dliq0: Initial guess for the seawater liquid water density in kg/m3. If None (default) then `flu3a._dliq_default` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg bool useext: If False (default) then the salt contribution is calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT. :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Density in kg/m3. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :raises RuntimeWarning: If the equilibrium seawater salinity is lower than the total parcel salinity. :Examples: >>> density(0.035,270.,1e5) 993.156434117 """ g_p = seaice_g(0,0,1,sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals, chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext, mathargs=mathargs) rho = g_p**(-1) return rho
40a365ea0d4c79813394790ef3df6c8eb21727b8
1,398
def prod_non_zero_diag(x): """Compute product of nonzero elements from matrix diagonal. input: x -- 2-d numpy array output: product -- integer number Not vectorized implementation. """ n = len(x) m = len(x[0]) res = 1 for i in range(min(n, m)): if (x[i][i] != 0): res *= x[i][i] return res pass
13e9f6cc9ea22e7901d454b23297a2e9c5da3a3a
1,399
def LoadSparse(inputfile, verbose=False): """Loads a sparse matrix stored as npz file to its dense represent.""" npzfile = np.load(inputfile) mat = sp.csr_matrix((npzfile['data'], npzfile['indices'], npzfile['indptr']), shape=tuple(list(npzfile['shape']))) if verbose: print 'Loaded sparse matrix from %s of shape %s' % (inputfile, mat.shape.__str__()) return mat.todense()
80dfeb5c48ab2c3905b78ba37226eb98fde5de45
1,400
def chain_decomposition(G, root=None): """Return the chain decomposition of a graph. The *chain decomposition* of a graph with respect a depth-first search tree is a set of cycles or paths derived from the set of fundamental cycles of the tree in the following manner. Consider each fundamental cycle with respect to the given tree, represented as a list of edges beginning with the nontree edge oriented away from the root of the tree. For each fundamental cycle, if it overlaps with any previous fundamental cycle, just take the initial non-overlapping segment, which is a path instead of a cycle. Each cycle or path is called a *chain*. For more information, see [1]_. Parameters ---------- G : undirected graph root : node (optional) A node in the graph `G`. If specified, only the chain decomposition for the connected component containing this node will be returned. This node indicates the root of the depth-first search tree. Yields ------ chain : list A list of edges representing a chain. There is no guarantee on the orientation of the edges in each chain (for example, if a chain includes the edge joining nodes 1 and 2, the chain may include either (1, 2) or (2, 1)). Raises ------ NodeNotFound If `root` is not in the graph `G`. Notes ----- The worst-case running time of this implementation is linear in the number of nodes and number of edges [1]_. References ---------- .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex- and 2-edge-connectivity." *Information Processing Letters*, 113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016> """ def _dfs_cycle_forest(G, root=None): """Builds a directed graph composed of cycles from the given graph. `G` is an undirected simple graph. `root` is a node in the graph from which the depth-first search is started. This function returns both the depth-first search cycle graph (as a :class:`~cynetworkx.DiGraph`) and the list of nodes in depth-first preorder. The depth-first search cycle graph is a directed graph whose edges are the edges of `G` oriented toward the root if the edge is a tree edge and away from the root if the edge is a non-tree edge. If `root` is not specified, this performs a depth-first search on each connected component of `G` and returns a directed forest instead. If `root` is not in the graph, this raises :exc:`KeyError`. """ # Create a directed graph from the depth-first search tree with # root node `root` in which tree edges are directed toward the # root and nontree edges are directed away from the root. For # each node with an incident nontree edge, this creates a # directed cycle starting with the nontree edge and returning to # that node. # # The `parent` node attribute stores the parent of each node in # the DFS tree. The `nontree` edge attribute indicates whether # the edge is a tree edge or a nontree edge. # # We also store the order of the nodes found in the depth-first # search in the `nodes` list. H = nx.DiGraph() nodes = [] for u, v, d in nx.dfs_labeled_edges(G, source=root): if d == 'forward': # `dfs_labeled_edges()` yields (root, root, 'forward') # if it is beginning the search on a new connected # component. if u == v: H.add_node(v, parent=None) nodes.append(v) else: H.add_node(v, parent=u) H.add_edge(v, u, nontree=False) nodes.append(v) # `dfs_labeled_edges` considers nontree edges in both # orientations, so we need to not add the edge if it its # other orientation has been added. elif d == 'nontree' and v not in H[u]: H.add_edge(v, u, nontree=True) else: # Do nothing on 'reverse' edges; we only care about # forward and nontree edges. pass return H, nodes def _build_chain(G, u, v, visited): """Generate the chain starting from the given nontree edge. `G` is a DFS cycle graph as constructed by :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge that begins a chain. `visited` is a set representing the nodes in `G` that have already been visited. This function yields the edges in an initial segment of the fundamental cycle of `G` starting with the nontree edge (`u`, `v`) that includes all the edges up until the first node that appears in `visited`. The tree edges are given by the 'parent' node attribute. The `visited` set is updated to add each node in an edge yielded by this function. """ while v not in visited: yield u, v visited.add(v) u, v = v, G.nodes[v]['parent'] yield u, v # Create a directed version of H that has the DFS edges directed # toward the root and the nontree edges directed away from the root # (in each connected component). H, nodes = _dfs_cycle_forest(G, root) # Visit the nodes again in DFS order. For each node, and for each # nontree edge leaving that node, compute the fundamental cycle for # that nontree edge starting with that edge. If the fundamental # cycle overlaps with any visited nodes, just take the prefix of the # cycle up to the point of visited nodes. # # We repeat this process for each connected component (implicitly, # since `nodes` already has a list of the nodes grouped by connected # component). visited = set() for u in nodes: visited.add(u) # For each nontree edge going out of node u... edges = ((u, v) for u, v, d in H.out_edges(u, data='nontree') if d) for u, v in edges: # Create the cycle or cycle prefix starting with the # nontree edge. chain = list(_build_chain(H, u, v, visited)) yield chain
603fffdd2530bbaf296bb628ec59ce0d7a9a8de2
1,402
def dup_lcm(f, g, K): """Computes polynomial LCM of `f` and `g` in `K[x]`. """ if K.has_Field or not K.is_Exact: return dup_ff_lcm(f, g, K) else: return dup_rr_lcm(f, g, K)
f5b6a7f3d0aa7155bfffd03b3bcb3d01e716855c
1,403
import urllib import json def add_tag_translation(request, tag_id, lang, text): """Adds a translation to the given Tag.""" tag = get_object_or_404(Tag, id=tag_id) text = urllib.unquote(text) data = {} langs = tag.site.get_languages(lang) if len(langs) == 0: data['error'] = 'No languages defined' else: TagTranslation.objects.create(tag=tag, language=langs[0], text=text) return HttpResponse(json.dumps(data), content_type='application/json')
2fe49be5bee9b6b13104ed1465ea6c993db9eb51
1,404
def simpson(x, with_replacement=False): """For computing simpson index directly from counts (or frequencies, if with_replacement=True) Parameters ---------- x : with_replacement : (Default value = False) Returns ------- """ total = np.sum(x) if with_replacement: return np.sum([(y / total) * (y / total) for y in x]) else: return np.sum([(y / total) * ((y - 1) / (total - 1)) for y in x])
282de607ee722d95db830ca7185a2d3519dcb78f
1,405
async def setup_automation(hass, device_id, trigger_type): """Set up an automation trigger for testing triggering.""" return await async_setup_component( hass, AUTOMATION_DOMAIN, { AUTOMATION_DOMAIN: [ { "trigger": { CONF_PLATFORM: "device", CONF_DOMAIN: DOMAIN, CONF_DEVICE_ID: device_id, CONF_TYPE: trigger_type, }, "action": { "service": "test.automation", "data": DATA_MESSAGE, }, }, ] }, )
e06d8c38ccfb76b5c89d002407d400bf0135749b
1,406
def generate_html_tutor_constraints(sai): """ Given an SAI, this finds a set of constraints for the SAI, so it don't fire in nonsensical situations. """ constraints = set() args = get_vars(sai) # selection constraints, you can only select something that has an # empty string value. if len(args) == 0: return frozenset() # get action action = sai[2] if action == "ButtonPressed": # Constrain the selection to be of type button # constraints.add(('type', selection, 'MAIN::button')) selection = args[0] constraints.add(('id', selection, 'done')) else: # print("SAI", sai) # print("ARGS", args) selection = args[0] constraints.add(('contentEditable', selection, True)) # constraints.add(('value', selection, '?selection-value')) # constraints.add((is_empty_string, '?selection-value')) # value constraints, don't select empty values for i, arg in enumerate(args[1:]): constraints.add(('value', arg, '?foa%ival' % (i+1))) constraints.add((is_not_empty_string, '?foa%ival' % (i+1))) # constraints.add(('type', a, 'MAIN::cell')) return frozenset(constraints)
d6201e80574f766ae57609707bad8f617d908682
1,407
def info(email): """Information about a specific email.""" with db_session() as db: user = db.query(User).filter(User.email == email).first() if user: return [user.email, user.api_key, user.grabs] else: return None
d9624f33f18dd507c2fceb4e7f917d9cd695dea9
1,410
import networkx def TetrahedralGraph(): """ Returns a tetrahedral graph (with 4 nodes). A tetrahedron is a 4-sided triangular pyramid. The tetrahedral graph corresponds to the connectivity of the vertices of the tetrahedron. This graph is equivalent to a wheel graph with 4 nodes and also a complete graph on four nodes. (See examples below). PLOTTING: The tetrahedral graph should be viewed in 3 dimensions. We chose to use the default spring-layout algorithm here, so that multiple iterations might yield a different point of reference for the user. We hope to add rotatable, 3-dimensional viewing in the future. In such a case, a string argument will be added to select the flat spring-layout over a future implementation. EXAMPLES: Construct and show a Tetrahedral graph :: sage: g = graphs.TetrahedralGraph() sage: g.show() # long time The following example requires networkx:: sage: import networkx as NX Compare this Tetrahedral, Wheel(4), Complete(4), and the Tetrahedral plotted with the spring-layout algorithm below in a Sage graphics array:: sage: tetra_pos = graphs.TetrahedralGraph() sage: tetra_spring = Graph(NX.tetrahedral_graph()) sage: wheel = graphs.WheelGraph(4) sage: complete = graphs.CompleteGraph(4) sage: g = [tetra_pos, tetra_spring, wheel, complete] sage: j = [] sage: for i in range(2): ....: n = [] ....: for m in range(2): ....: n.append(g[i + m].plot(vertex_size=50, vertex_labels=False)) ....: j.append(n) sage: G = graphics_array(j) sage: G.show() # long time """ G = networkx.tetrahedral_graph() return Graph(G, name="Tetrahedron", pos = { 0 : (0, 0), 1 : (0, 1), 2 : (cos(3.5*pi/3), sin(3.5*pi/3)), 3 : (cos(5.5*pi/3), sin(5.5*pi/3))} )
b39014244ae2750b2e118d2b7cfe4b7d7cd55997
1,411
import logging def build_resnet_v1(input_shape, depth, num_classes, pfac, use_frn=False, use_internal_bias=True): """Builds ResNet v1. Args: input_shape: tf.Tensor. depth: ResNet depth. num_classes: Number of output classes. pfac: priorfactory.PriorFactory class. use_frn: if True, then use Filter Response Normalization (FRN) instead of batchnorm. use_internal_bias: if True, use biases in all Conv layers. If False, only use a bias in the final Dense layer. Returns: tf.keras.Model. """ def resnet_layer(inputs, filters, kernel_size=3, strides=1, activation=None, pfac=None, use_frn=False, use_bias=True): """2D Convolution-Batch Normalization-Activation stack builder. Args: inputs: tf.Tensor. filters: Number of filters for Conv2D. kernel_size: Kernel dimensions for Conv2D. strides: Stride dimensinons for Conv2D. activation: tf.keras.activations.Activation. pfac: prior.PriorFactory object. use_frn: if True, use Filter Response Normalization (FRN) layer use_bias: if True, use biases in Conv layers. Returns: tf.Tensor. """ x = inputs logging.info('Applying conv layer.') x = pfac(tf.keras.layers.Conv2D( filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', use_bias=use_bias))(x) if use_frn: x = pfac(frn.FRN())(x) else: x = tf.keras.layers.BatchNormalization()(x) if activation is not None: x = tf.keras.layers.Activation(activation)(x) return x # Main network code num_res_blocks = (depth - 2) // 6 filters = 16 if (depth - 2) % 6 != 0: raise ValueError('depth must be 6n+2 (e.g. 20, 32, 44).') logging.info('Starting ResNet build.') inputs = tf.keras.layers.Input(shape=input_shape) x = resnet_layer(inputs, filters=filters, activation='relu', pfac=pfac, use_frn=use_frn, use_bias=use_internal_bias) for stack in range(3): for res_block in range(num_res_blocks): logging.info('Starting ResNet stack #%d block #%d.', stack, res_block) strides = 1 if stack > 0 and res_block == 0: # first layer but not first stack strides = 2 # downsample y = resnet_layer(x, filters=filters, strides=strides, activation='relu', pfac=pfac, use_frn=use_frn, use_bias=use_internal_bias) y = resnet_layer(y, filters=filters, activation=None, pfac=pfac, use_frn=use_frn, use_bias=use_internal_bias) if stack > 0 and res_block == 0: # first layer but not first stack # linear projection residual shortcut connection to match changed dims x = resnet_layer(x, filters=filters, kernel_size=1, strides=strides, activation=None, pfac=pfac, use_frn=use_frn, use_bias=use_internal_bias) x = tf.keras.layers.add([x, y]) if use_frn: x = pfac(frn.TLU())(x) else: x = tf.keras.layers.Activation('relu')(x) filters *= 2 # v1 does not use BN after last shortcut connection-ReLU x = tf.keras.layers.AveragePooling2D(pool_size=8)(x) x = tf.keras.layers.Flatten()(x) x = pfac(tf.keras.layers.Dense( num_classes, kernel_initializer='he_normal'))(x) logging.info('ResNet successfully built.') return tf.keras.models.Model(inputs=inputs, outputs=x)
f72a48fcd0df9c7b3b2ea0c5c5db9d3dffad55b6
1,412
def lr_recover_l1(invecs, intensities, nonneg=True, **kwargs): """Computes the low-rank matrix reconstruction using l1-minimisation .. math:: \min_Z \sum_i \vert \langle a_i| Z | a_i \rangle - y_i \vert \\ \mathrm{s.t.} Z \ge 0 where :math:`a_i` are the input vectors and :math:`y_i` are the measured intensities. For the arguments not listed see :func:`recover` :param bool nonneg: Enfornce the constraint Z >= 0 (default True) :param kwargs: Additional arguemnts passed to `cvx.Problem.solve` :returns: array of shape (dim, dim); Low-rank matrix approximation for given measurements """ dim = invecs.shape[1] # we have to manually convert convex programm to real form since cvxpy # does not support complex programms z, mat_cons = _semidef_complex_as_real(dim) if nonneg else \ _hermitian_as_real(dim) invecs_real = np.concatenate((invecs.real, invecs.imag), axis=1) obj = cvx.Minimize(sum(cvx.abs(cvx.quad_form(a, z) - y) for a, y in zip(invecs_real, intensities))) prob = cvx.Problem(obj, mat_cons) prob.solve(**kwargs) if prob.status not in ['optimal', 'optimal_inaccurate']: raise RuntimeError("Optimization did not converge: " + prob.status) return z.value[:dim, :dim] + 1.j * z.value[dim:, :dim]
6551f8e3ff146831d51d338c00c5f2e8b3d7acef
1,413
import pickle def start_view_data(trans_id): """ This method is used to execute query using asynchronous connection. Args: trans_id: unique transaction id """ limit = -1 # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == ERROR_MSG_TRANS_ID_NOT_FOUND: return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) # get the default connection as current connection which is attached to # trans id holds the cursor which has query result so we cannot use that # connection to execute another query otherwise we'll lose query result. try: manager = get_driver(PG_DEFAULT_DRIVER).connection_manager( trans_obj.sid) default_conn = manager.connection(did=trans_obj.did) except (ConnectionLost, SSHTunnelConnectionLost) as e: raise except Exception as e: current_app.logger.error(e) return internal_server_error(errormsg=str(e)) # Connect to the Server if not connected. if not default_conn.connected(): status, msg = default_conn.connect() if not status: return make_json_response( data={'status': status, 'result': "{}".format(msg)} ) if status and conn is not None and \ trans_obj is not None and session_obj is not None: # set fetched row count to 0 as we are executing query again. trans_obj.update_fetched_row_cnt(0) # Fetch the sql and primary_keys from the object sql = trans_obj.get_sql(default_conn) pk_names, primary_keys = trans_obj.get_primary_keys(default_conn) session_obj['command_obj'] = pickle.dumps(trans_obj, -1) has_oids = False if trans_obj.object_type == 'table': # Fetch OIDs status has_oids = trans_obj.has_oids(default_conn) # Fetch the applied filter. filter_applied = trans_obj.is_filter_applied() # Fetch the limit for the SQL query limit = trans_obj.get_limit() can_edit = trans_obj.can_edit() can_filter = trans_obj.can_filter() # Store the primary keys to the session object session_obj['primary_keys'] = primary_keys # Store the OIDs status into session object session_obj['has_oids'] = has_oids update_session_grid_transaction(trans_id, session_obj) # Execute sql asynchronously status, result = conn.execute_async(sql) else: status = False result = error_msg filter_applied = False can_edit = False can_filter = False sql = None return make_json_response( data={ 'status': status, 'result': result, 'filter_applied': filter_applied, 'limit': limit, 'can_edit': can_edit, 'can_filter': can_filter, 'sql': sql, 'info_notifier_timeout': blueprint.info_notifier_timeout.get() } )
0c4c5797578abc0623805d269043c6fb3c0512a9
1,415
def fft_resize(images, resize=False, new_size=None): """Function for applying DFT and resizing. This function takes in an array of images, applies the 2-d fourier transform and resizes them according to new_size, keeping the frequencies that overlap between the two sizes. Args: images: a numpy array with shape [batch_size, height, width, num_channels] resize: boolean, whether or not to resize new_size: a tuple (size, size), with height and width the same Returns: im_fft_downsampled: a numpy array with shape [batch_size, (new) height, (new) width, num_channels] """ assert len(images.shape) == 4, ("expecting images to be" "[batch_size, height, width, num_channels]") im_complex = images.astype("complex64") im_fft = np.fft.fft2(im_complex, axes=(1, 2)) # resizing images if resize: # get fourier frequencies to threshold assert (im_fft.shape[1] == im_fft.shape[2]), ("Need images to have same" "height and width") # downsample by threshold width = im_fft.shape[2] new_width = new_size[0] freqs = np.fft.fftfreq(width, d=1.0 / width) idxs = np.flatnonzero((freqs >= -new_width / 2.0) & (freqs < new_width / 2.0)) im_fft_downsampled = im_fft[:, :, idxs, :][:, idxs, :, :] else: im_fft_downsampled = im_fft return im_fft_downsampled
64846e58fa8ad422b4668062b09458110feed7f5
1,416
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, groups=groups, bias=False, dilation=dilation))
4b23699e4766f341262499608680f5f1f2b6cd26
1,418
def get_inbound_layers_without_params(layer): """Return inbound layers. Parameters ---------- layer: Keras.layers A Keras layer. Returns ------- : list[Keras.layers] List of inbound layers. """ return [layer for layer in get_inbound_layers(layer) if not has_weights(layer)]
9671b9277cb690dc54ffbcb35a4e22672b81748d
1,419
def orders_matchresults(symbol, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None): """ :param symbol: :param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖} :param start_date: :param end_date: :param _from: :param direct: 可选值{prev 向前,next 向后} :param size: :return: """ params = {'symbol': symbol} if types: params[types] = types if start_date: params['start-date'] = start_date if end_date: params['end-date'] = end_date if _from: params['from'] = _from if direct: params['direct'] = direct if size: params['size'] = size url = '/v1/order/matchresults' return api_key_get(params, url)
61121dc12bed75236622e45fb6c96b2595008e64
1,420
def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision
3979236388aecaa32452958bf344632a1c781181
1,421
def _is_grpc_unary_method(attr): """Check if attribute is a grpc method that returns unary.""" return isinstance(attr, (grpc.UnaryUnaryMultiCallable, grpc.StreamUnaryMultiCallable))
cb20221038ea2754d259fca22941ba91944aceb8
1,422
def gridcorner( D, xyz, labels=None, projection="max_slice", max_n_ticks=4, factor=2, whspace=0.05, showDvals=True, lines=None, label_offset=0.4, **kwargs ): """Generate a grid corner plot Parameters ---------- D: array_like N-dimensional data to plot, `D.shape` should be `(n1, n2,..., nN)`, where `N`, is the number of grid points along dimension `i`. xyz: list List of 1-dimensional arrays of coordinates. `xyz[i]` should have length `N` (see help for `D`). labels: list N+1 length list of labels; the first N correspond to the coordinates labels, the final label is for the dependent (D) variable. projection: str or func If a string, one of `{"log_mean", "max_slice"} to use inbuilt functions to calculate either the logged mean or maximum slice projection. Else a function to use for projection, must take an `axis` argument. Default is `gridcorner.max_slice()`, to project out a slice along the maximum. max_n_ticks: int Number of ticks for x and y axis of the `pcolormesh` plots. factor: float Controls the size of one window. showDvals: bool If true (default) show the D values on the right-hand-side of the 1D plots and add a label. lines: array_like N-dimensional list of values to delineate. Returns ------- fig, axes: The figure and NxN set of axes """ ndim = D.ndim fig, axes = _get_fig_and_axes(ndim, factor, whspace) if type(projection) == str: if projection in ["log_mean"]: projection = log_mean elif projection in ["max_slice"]: projection = max_slice else: raise ValueError("Projection {} not understood".format(projection)) for i in range(ndim): projection_1D( axes[i, i], xyz[i], D, i, projection=projection, showDvals=showDvals, lines=lines, **kwargs ) for j in range(ndim): ax = axes[i, j] if j > i: ax.set_frame_on(False) ax.set_xticks([]) ax.set_yticks([]) continue ax.get_shared_x_axes().join(axes[ndim - 1, j], ax) if i < ndim - 1: ax.set_xticklabels([]) if j < i: ax.get_shared_y_axes().join(axes[i, i - 1], ax) if j > 0: ax.set_yticklabels([]) if j == i: continue ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="upper")) ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="upper")) ax, pax = projection_2D( ax, xyz[i], xyz[j], D, i, j, lines=lines, projection=projection, **kwargs ) if labels: for i in range(ndim): axes[-1, i].set_xlabel(labels[i]) if i > 0: axes[i, 0].set_ylabel(labels[i]) if showDvals: axes[i, i].set_ylabel(labels[-1]) for ax in axes[:, 0]: ax.yaxis.set_label_coords(-label_offset, 0.5) for ax in axes[-1, :]: ax.xaxis.set_label_coords(0.5, -label_offset) return fig, axes
37c929a5b323deb1968ba6504113141bfc8ee830
1,423
from ._standard_montage_utils import _str_names, _str def read_dig_hpts(fname, unit='mm'): """Read historical .hpts mne-c files. Parameters ---------- fname : str The filepath of .hpts file. unit : 'm' | 'cm' | 'mm' Unit of the positions. Defaults to 'mm'. Returns ------- montage : instance of DigMontage The montage. See Also -------- DigMontage read_dig_captrak read_dig_dat read_dig_egi read_dig_fif read_dig_polhemus_isotrak make_dig_montage Notes ----- The hpts format digitzer data file may contain comment lines starting with the pound sign (#) and data lines of the form:: <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*> where: ``<*category*>`` defines the type of points. Allowed categories are: ``hpi``, ``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to head-position indicator coil locations, cardinal landmarks, EEG electrode locations, and additional head surface points, respectively. ``<*identifier*>`` identifies the point. The identifiers are usually sequential numbers. For cardinal landmarks, 1 = left auricular point, 2 = nasion, and 3 = right auricular point. For EEG electrodes, identifier = 0 signifies the reference electrode. ``<*x/mm*> , <*y/mm*> , <*z/mm*>`` Location of the point, usually in the head coordinate system in millimeters. If your points are in [m] then unit parameter can be changed. For example:: cardinal 2 -5.6729 -12.3873 -30.3671 cardinal 1 -37.6782 -10.4957 91.5228 cardinal 3 -131.3127 9.3976 -22.2363 hpi 1 -30.4493 -11.8450 83.3601 hpi 2 -122.5353 9.2232 -28.6828 hpi 3 -6.8518 -47.0697 -37.0829 hpi 4 7.3744 -50.6297 -12.1376 hpi 5 -33.4264 -43.7352 -57.7756 eeg FP1 3.8676 -77.0439 -13.0212 eeg FP2 -31.9297 -70.6852 -57.4881 eeg F7 -6.1042 -68.2969 45.4939 ... """ _scale = _check_unit_and_get_scaling(unit) out = np.genfromtxt(fname, comments='#', dtype=(_str, _str, 'f8', 'f8', 'f8')) kind, label = _str_names(out['f0']), _str_names(out['f1']) kind = [k.lower() for k in kind] xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T xyz *= _scale del _scale fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'} fid = {fid_idx_to_label[label[ii]]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'} ch_pos = {label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'} hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == 'hpi']) hpi.shape = (-1, 3) # in case it's empty hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == 'extra']) hsp.shape = (-1, 3) # in case it's empty return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp)
e5159900f5eb6e846f4028a237884fb0f85323f3
1,424
def GetTaskAttr( fname, attrName, defaultVal = None ): """Return the specified attribute of a task, or the specified default value if the task does not have this attribute.""" for line in SlurpFile( fname ).rstrip('\n').split('\n'): arg, val = line.split('\t') if arg == attrName: return coerceVal( val ) return defaultVal
ab4e8ca8286e94895afe78353b98283b7ab7e890
1,425
def print_board(white, black): """Produce GnuGO like output to verify board position. Args: white (np.array): array with 1's for white black (np.array): array with 1's for black Returns: str: gnugo like output (without legend) """ s = '' for x in xrange(19): for y in xrange(19): if white[x][y] == 1: s += '0 ' elif black[x][y] == 1: s += 'X ' else: s += '. ' s += '\n' return s
5d6143ffd1964cbe41d7d9e0a9c9b7696e7d5008
1,426
from typing import List def get_repositories_containing_graph(name: str) -> List[str]: """Returns the repositories containing a graph with the given graph name. Parameters ---------------------------- name: str, The name of the graph to retrieve. Returns ---------------------------- List of repository names. """ return [ repository for repository in get_available_repositories() if name in get_available_graphs_from_repository(repository) ]
16d2135e1d68fe699fa68b7786f7363dd06e5ab5
1,427
def build_query(dct): """Build SQL with '?' and value tuples from clause dictionary""" if (dct is not {}): str_clauses = '' tpl_values = () bln_start = True #print dct for str_field, dct_op_val in dct.iteritems(): if (str_field is not None): if (bln_start): str_open = ' (' bln_start = False else: str_open = ' and (' str_clauses = ''.join([str_clauses, str_open, str_field, ' ', \ dct_op_val['logic'], ' ?)']) var_val = dct_op_val['value'] if (str(var_val).lower() == 'null'): var_val = None tpl_values = tpl_values + (var_val, ) else: # simple 1 or 0 (ALL records or NO records) ... # trumps all other clauses, so lets exit the loop str_clauses = ' ?' tpl_values = (dct_op_val['value'],) break return (tpl_values, str_clauses) else: return ((), " 1")
ac49014c8e629d2fdc12472f2b8b345cbee8ce18
1,428
def load_secrets(fn=".env", prefix="DJANGO_ENV_", **kwargs): """Load a list of configuration variables. Return a dictionary of configuration variables, as loaded from a configuration file or the environment. Values passed in as ``args`` or as the value in ``kwargs`` will be used as the configuration variable's default value if one is not found in the configuration file or environment. Parameters ---------- fn : string, default=".env" Configuration filename, defaults to ``.env``. May be in TOML, JSON, YAML, or BespON formats. Formats will be attempted in this order. prefix : string, default="DJANGO_ENV_" Prefix for environment variables. This prefix will be prepended to all variable names before searching for them in the environment. kwargs : dict, optional Dictionary with configuration variables as keys and default values as values. Returns ------- dict A dictionary of configuration variables and their values. """ return merge(kwargs, load_file(fn), load_environment(prefix))
6f18ba641e4c23383e47c7422efaaa990af4cc6a
1,429
import itertools def dependency_chain(pkgname): """Return an ordered list of dependencies for a package""" depends = recurse_depends(pkgname) return set(list(depends.keys()) + list(itertools.chain.from_iterable(depends.values())))
208226f2d771f4fa278a0295997fc53df55caa8f
1,430
from typing import Callable import click def with_input(func: Callable) -> Callable: """ Attaches a "source" argument to the command. """ return click.argument( "source", type=click.Path(exists=True), required=True )(func)
3117f183ac4e4d459a718b59fc9a3ba00b36e291
1,431
def check_loop_validity(inst_list): """ Given a list of instructions, check whether they can form a valid loop. This means, checking for anything that could create an infinite loop. We are also disallowing double loops right now""" for i, c in enumerate(inst_list): if c in [5, 6, 16, 25]: return False, i return True, -1
a58923e014947d1406165a831a57b73fcb9ab226
1,432
def target_channel_id_name_list( conversations_list: list=None, including_archived: bool=False): """extract targeted channels id list from conversations_list response. Returns: id_list, name_list """ id_list = [] name_list = [] for ch in conversations_list: if including_archived is False: if ch['is_archived'] is True: continue id_list.append(ch['id']) name_list.append(ch['name']) return id_list, name_list
be2ec76242367a170deac2e577ec90c435046ef9
1,433
def NETWORKDAYS(*args) -> Function: """ Returns the number of net working days between two provided days. Learn more: https//support.google.com/docs/answer/3092979 """ return Function("NETWORKDAYS", args)
f93f34ef173a6f3f552062f33b599988ea63cb8a
1,434
def calc_high_outlier(values) -> float: """Calculates the high outlier from a pandas Series""" q1, q3 = [values.quantile(x, 'midpoint') for x in (0.25, 0.75)] return q3 + 1.5 * (q3 - q1)
8ee929aec1cb4af9a90d04893f8f94444d00ad22
1,435
def get_sql_delete_by_ids(table: str, ids_length: int): """ 获取添加数据的字符串 :param table: :return: """ # 校验数据 if not table: raise ParamError(f"table 参数错误:table={table}") if not ids_length or not isinstance(ids_length, int): raise ParamError(f"ids_length 参数错误:ids_length={ids_length}") # 准备参数 ids = ["%s" for _ in range(ids_length)] ids_str = ", ".join(ids) # 准备sql s = f"delete from {table} where id in ({ids_str});" return s
f9980c92f5fa1064a99823655be4ea8aed619db3
1,436
def Float(request): """ A simple form with a single integer field """ schema = schemaish.Structure() schema.add('myFloatField', schemaish.Float()) form = formish.Form(schema, 'form') return form
18ddcaa697dca96ea321a1aba51d4d2fb0fed47c
1,437
def pca(X): """ Returns the eigenvectors U, the eigenvalues (on diagonal) in S. Args: X: array(# of training examples, n) Returns: U: array(n, n) S: array(n, n) """ # Get some useful values m, n, _, _ = X.shape # Init U and S. U = np.zeros(n) S = np.zeros(n) # When computing the covariance matrix, we have # to divide by m (the number of examples). sigma = (1. / m) * np.dot(X.T, X) # Compute the eigenvectors and eigenvalues # of the covariance matrix. U, S, V = linalg.svd(sigma) S = linalg.diagsvd(S, len(S), len(S)) return U, S
cf6304615b3d75b730235f238822c347342a4cbd
1,438
import imp def read_py_version(script_name, search_path): """Read the version of a script from a python file""" file, pathname, desc = imp.find_module(script_name, [search_path]) try: new_module = imp.load_module(script_name, file, pathname, desc) if hasattr(new_module.SCRIPT, "version"): return new_module.SCRIPT.version except: pass return None
b69d9ff9f6f718c418fac1b0cd77355d8d4ffd1d
1,439
def post_check_variable(team_id, source_id, check_id): """ .. :quickref: POST; Lorem ipsum.""" if not TeamPermission.is_manager_or_editor(team_id): abort(403) payload = get_payload() payload.update({"team_id": team_id, "source_id": source_id, "check_id": check_id}) variable = VariableController.create(payload) return jsonify(format_variable(variable)), 200
5c187a5cfec19409c155068c6c8212217adb3632
1,440
import re def diff_re(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): """ A simple "diff" of two sets of lines when the expected lines are regular expressions. This is a really dumb thing that just compares each line in turn, so it doesn't look for chunks of matching lines and the like--but at least it lets you know exactly which line first didn't compare correctl... """ result = [] diff = len(a) - len(b) if diff < 0: a = a + [''] * (-diff) elif diff > 0: b = b + [''] * diff i = 0 for aline, bline in zip(a, b): s = "^" + aline + "$" try: expr = re.compile(s) except re.error as e: msg = "Regular expression error in %s: %s" raise re.error(msg % (repr(s), e.args[0])) if not expr.search(bline): result.append("%sc%s" % (i + 1, i + 1)) result.append('< ' + repr(a[i])) result.append('---') result.append('> ' + repr(b[i])) i = i + 1 return result
802dd3287502c3d3fe85242ba51043e4b5769cd5
1,441
import pickle def storeAgent(sess, agentObj): """ INPUT : session object OUTPUT : Updated agent Onject DESCRIPTION : Updates the agent object in that session """ currAgents = getCurrGen(sess) lock(sess) try: if(sess.mode == 'SAFE'): tpfp = open(GA_UTIL_DIR+"/utilFiles/tmp"+str(agentObj.sessID)+"/dnaPool/dna" +str(agentObj.agentID)+".dna", "wb") pickle.dump(agentObj, tpfp) tpfp.close() currAgents.add(agentObj.agentID) else: sess.agentBasket[agentObj.agentID] = agentObj currAgents.add(agentObj.agentID) except Exception: print("error in store agent, couldnt wb") return(0) setCurrGen( sess, currAgents) unlock(sess) return(agentObj.agentID)
57b74d722141f008332a8fa129018f0b72fcc26d
1,442
def info(device): """ Get filesystem geometry information. CLI Example: .. code-block:: bash salt '*' xfs.info /dev/sda1 """ out = __salt__["cmd.run_all"]("xfs_info {}".format(device)) if out.get("stderr"): raise CommandExecutionError(out["stderr"].replace("xfs_info:", "").strip()) return _parse_xfs_info(out["stdout"])
96b1f91c921f607c0e348b8dd4355699fc12c5f0
1,443
from typing import Union from typing import Dict from typing import Tuple from typing import Any def serialize_framework_build_config(dict_: Union[Dict[str, str], str]) -> Tuple[Any, ...]: """Serialize a dict to a hashable tuple. Parameters ---------- dict_: Dict[str, str] Returns ------- hashable_tuple: Tuple[Any, ...] A hashable tuple. """ if isinstance(dict_, dict): return tuple(sorted(list(dict_.items()))) return (dict_,)
365b413ff21bf4fb7f5d153dbe74801ee125108f
1,444
def _check_columns(data: pd.DataFrame, features: list) -> pd.DataFrame: """ Given a dataframe and a list of expected features, print missing columns and return new dataframe with only valid features Parameters ----------- data: Pandas.DataFrame DataFrame for checking features: list list of features (column names) Returns --------- Pandas.DataFrame new 'valid' DataFrame """ valid_features = [f for f in features if f in data.columns] if len(valid_features) < len(features): print(f'The following features are missing from the training data and will be excluded from the ' f'model {list(set(features) - set(valid_features))}') return data[valid_features]
ad0c0eb17b7afeaad7505d69f77336820607d77b
1,445
def get_confidence(imgfilename): """ 1003_c60.jpg -> c6 """ if not imgfilename: return '' return 'c' + imgfilename.split('/')[-1][0:1]
7c98f2abd2119b41d7e2501823985a894da5a1a1
1,446
def get_connection(hostname: str, port: int, username: str, password: str): """ DBへのコネクションを取得します。 Returns: Connection: コネクション """ return pymysql.connect( host=hostname, port=port, user=username, password=password, cursorclass=cursors.DictCursor )
c2036f9b5ea2e69e6d0cd94fdcf0aa55e69d5d6f
1,447
def get_alleles_existing_alleleinterpretation( session, allele_filter, user=None, page=None, per_page=None ): """ Returns allele_ids that has connected AlleleInterpretations, given allele_filter from argument. Supports pagination. """ # Apply filter using Allele table as base allele_ids = session.query(allele.Allele.id).filter(allele_filter) # Now get the ones that are actually connected to AlleleInterpretation # (distinct allele_ids sorted by date_last_update) alleleinterpretation_allele_ids = ( session.query(workflow.AlleleInterpretation.allele_id) .filter(workflow.AlleleInterpretation.allele_id.in_(allele_ids)) .group_by(workflow.AlleleInterpretation.allele_id) .order_by(func.max(workflow.AlleleInterpretation.date_last_update).desc()) ) count = alleleinterpretation_allele_ids.count() if page and per_page: start = (page - 1) * per_page end = page * per_page alleleinterpretation_allele_ids = alleleinterpretation_allele_ids.slice(start, end) alleleinterpretation_allele_ids = alleleinterpretation_allele_ids.all() return alleleinterpretation_allele_ids, count
d7b42ac327f284d5905c5dc5b6893cbf0c18714e
1,448
import logging def _get_session(db_uri, use_batch_mode=True, echo=False): """Helper to get an SQLAlchemy DB session""" # `use_batch_mode` is experimental currently, but needed for `executemany` #engine = create_engine(db_uri, use_batch_mode=use_batch_mode, echo=echo) engine = create_engine(db_uri, echo=echo) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() try: connection = session.connection() logging.info('Successfully connected to database.') except: raise RuntimeError(f'Couldn\'t connect to db: {db_uri}') return session
d9b3455b601c86face0683ac0d8f3d8763180093
1,449
def has_extension(experiment: Experiment, name: str) -> bool: """ Check if an extension is declared in this experiment. """ return get_extension(experiment, name) is not None
6bf7630634be8802364e1a2fa38e58df523f82d9
1,450
def min_max_median(lst): """ a function that takes a simple list of numbers lst as a parameter and returns a list with the min, max, and the median of lst. """ s = sorted(lst) n = len(s) return [ s[0], s[-1], s[n//2] if n % 2 == 1 else (s[n//2 - 1] + s[n//2]) / 2]
59b1ceef5796d77cc039a42593ddb3d1d2244bd7
1,452
import copy def assign_read_kmers_to_contigs_new(kmer_ii, ambiguous_kmer_counts, unambiguous_contig_counts, contig_abundances): """ Assign ambiguous read k-mers based on contig averages counts. """ contig_counts = copy.deepcopy(unambiguous_contig_counts) contig_location_tuples = [] total_abundance = 0 # Cycle through all ambiguous k-mers and assign them. for kmer in ambiguous_kmer_counts.keys(): # and randomly assign the count to one of the items. contig_location_tuples = kmer_ii[kmer] #print 'Kmer:\t' + kmer #print 'Count:\t' + str(ambiguous_kmer_counts[kmer]) #print 'Contig_locations:' #pprint.pprint(contig_location_tuples) # and randomly assign the count to one of the items. #contigs_containing_kmer = accumulate(kmer_ii[kmer]) #print kmer +'\t', contigs_containing_kmer = list(accumulate(contig_location_tuples)) #print contigs_containing_kmer # Calculate total abundance for contig in contigs_containing_kmer: total_abundance += contig_abundances[contig[0]] # Assign fractional counts based on total abundances. for contig in contigs_containing_kmer: #total_abundance += contig_abundances[contig[0]] #print 'Assigning\t' + str(contig_abundances[contig[0]] * ambiguous_kmer_counts[kmer] / total_abundance) + '\tto\t' + contig[0] contig_counts[contig[0]] += (contig_abundances[contig[0]] * ambiguous_kmer_counts[kmer] / total_abundance) total_abundance = 0 #for i in xrange(0, ambiguous_kmer_counts[kmer]): # contig = random.choice(contig_location_tuples)[0] # #print "Selecting contig:\t" + contig # contig_counts[contig] += 1 return contig_counts
a2d7a133183b7d020f461989065c132fb87bf336
1,454
def cremi_scores(seg, gt, border_threshold=None, return_all=True): """ Compute the cremi scores (Average of adapted rand error, vi-split, vi-merge) Parameters ---------- seg: np.ndarray - the candidate segmentation gt: np.ndarray - the groundtruth border_threshold: value by which the border is eroded (default: None = no erosion) Returns ------- cremi-score: average of rand error, vi-split, vi-merge vi-split: variation of information, split score vi-merge: variation of information, merge score adapted rand: adapted rand error """ assert seg.shape == gt.shape, "%s, %s" % (str(seg.shape, gt.shape)) # compute border threshold if specified if border_threshold is not None: xy_resolution = 4. gt_ = create_border_mask(gt, border_threshold / xy_resolution, np.uint64(-1)) # add 1 to map back to 0 as lowest label gt_ += 1 else: gt_ = gt ## Try except because sometimes both have nothing in them. try: vi_s, vi_m = voi(seg, gt_) are = adapted_rand(seg, gt_) cs = (vi_s + vi_m + are) / 3 except: cs = np.nan vi_s = np.nan vi_m = np.nan are = np.nan if return_all: return {'cremi-score': cs, 'vi-split': vi_s, 'vi-merge': vi_m, 'adapted_rand': are} else: return cs
9781eeb38885fe5efc3e052a15e418e39acdcc3c
1,455
def sign_transaction(transaction_dict, private_key) -> SignedTransaction: """ Sign a (non-staking) transaction dictionary with the specified private key Parameters ---------- transaction_dict: :obj:`dict` with the following keys nonce: :obj:`int` Transaction nonce gasPrice: :obj:`int` Transaction gas price in Atto gas: :obj:`int` Gas limit in Atto to: :obj:`str` Destination address value: :obj:`int` Amount to be transferred in Atto data: :obj:`str` Transaction data, used for smart contracts from: :obj:`str` From address, optional (if passed, must match the public key address generated from private_key) chainId: :obj:`int` One of util.chainIds.keys(), optional If you want to replay your transaction across networks, do not pass it shardID: :obj:`int` Originating shard ID, optional (needed for cx shard transaction) toShardID: :obj:`int` Destination shard ID, optional (needed for cx shard transaction) r: :obj:`int` First 32 bytes of the signature, optional s: :obj:`int` Next 32 bytes of the signature, optional v: :obj:`int` Recovery value, optional private_key: :obj:`str` The private key Returns ------- A SignedTransaction object, which is a named tuple rawTransaction: :obj:`str` Hex bytes of the raw transaction hash: :obj:`str` Hex bytes of the transaction hash r: :obj:`int` First 32 bytes of the signature s: :obj:`int` Next 32 bytes of the signature v: :obj:`int` Recovery value Raises ------ TypeError, if the from address specified is not the same one as derived from the the private key AssertionError, if the fields for the transaction are missing, or if the chainId supplied is not a string, or if the chainId is not a key in util.py API Reference ------------- https://readthedocs.org/projects/eth-account/downloads/pdf/stable/ """ account, sanitized_transaction = sanitize_transaction(transaction_dict, private_key) if 'to' in sanitized_transaction and sanitized_transaction[ 'to' ] is not None: sanitized_transaction[ 'to' ] = convert_one_to_hex( sanitized_transaction[ 'to' ] ) filled_transaction = pipe( # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/transactions.py#L39 sanitized_transaction, dict, partial(merge, TRANSACTION_DEFAULTS), chain_id_to_v, apply_formatters_to_dict(HARMONY_FORMATTERS) ) unsigned_transaction = serialize_transaction(filled_transaction) transaction_hash = unsigned_transaction.hash() if isinstance(unsigned_transaction, (UnsignedEthereumTxData, UnsignedHarmonyTxData)): chain_id = None # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/signing.py#L26 else: chain_id = unsigned_transaction.v (v, r, s) = sign_transaction_hash( account._key_obj, transaction_hash, chain_id) encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s)) signed_transaction_hash = keccak(encoded_transaction) return SignedTransaction( rawTransaction=HexBytes(encoded_transaction), hash=HexBytes(signed_transaction_hash), r=r, s=s, v=v, )
735de56f1a2b9557cc09b9e589586eb92196936c
1,456
def integralHesapla(denklem): """ Polinom kullanarak integral hesaplar. :param denklem: İntegrali hesaplanacak polinom. """ a,b=5,len(anaVeriler) deltax = 0.1 integral = 0 n = int((b - a) / deltax) for i in range(n): integral += deltax * (denklem.subs({x:a}) + denklem.subs({x:a+deltax})) / 2 a += deltax return integral
01bb7ebc5b678dc255e311c03c76415b0ac6f2db
1,457
def fmt(text,*args,**kwargs): """ String formatting made easy text - pattern Examples fmt("The is one = %ld", 1) fmt("The is text = %s", 1.3) fmt("Using keywords: one=%(one)d, two=%(two)d", two=2, one=1) """ return _fmt(text,args,kwargs)
a03a367d116bcde83bd0ff41ca8eb181af4c8aed
1,458
def value_to_class(v): """ Return the label of the pixel patch, by comparing the ratio of foreground to FOREGROUND_THRESHOLD Input: patch (numpy.ndarray): patch of a groundtruth image size:(PATCH_SIZE, PATCH_SIZE) Output: the label of the patch: 1: foreground 0: background """ df = np.sum(v) if df > FOREGROUND_THRESHOLD: return 1 else: return 0
fe50615d7ed3567bb3e7de8987ce07f5736a0a5c
1,459
def mc(cfg): """ Return the MC (multi-corpus) AAI model, trained on the dysarthric and cross corpora. 3 BLSTM layers, and two single linear regression output layers to obtain articulatory trajectories corresponding to each corpus. Parameters ---------- cfg: main.Configuration user configuration file Returns ------- Model """ mdninput_Lstm = keras.Input(shape = (None, cfg.mfcc_dim)) lstm_1 = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'], return_sequences=True))(mdninput_Lstm) lstm_2a = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'], return_sequences=True))(lstm_1) lstm_2 = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'], return_sequences=True))(lstm_2a) output_1 = TimeDistributed(Dense(cfg.ema_dim, activation='linear'))(lstm_2) output_2 = TimeDistributed(Dense(cfg.ema_dim, activation='linear'))(lstm_2) model = keras.models.Model(mdninput_Lstm, [output_1, output_2]) return model
e93ca764185b9a2bd60929e9cf50574432bcf97f
1,461
from typing import Callable def gradient_dxyz(fxyz: tf.Tensor, fn: Callable) -> tf.Tensor: """ Function to calculate gradients on x,y,z-axis of a tensor using central finite difference. It calculates the gradient along x, y, z separately then stack them together :param fxyz: shape = (..., 3) :param fn: function to call :return: shape = (..., 3) """ return tf.stack([fn(fxyz[..., i]) for i in [0, 1, 2]], axis=4)
cc6e4660a2bfff22d04a2d05e3ff6b1dd7e5846a
1,462
import multiprocessing def get_chunk_range(): """ Get the range of partitions to try. """ n_chunks = multiprocessing.cpu_count() if n_chunks > 128: raise NotImplementedError('Currently we consider the num. procs in machine to ' 'be < 128') chunk_range = [n_chunks] while n_chunks < 128: n_chunks *= 2 chunk_range += [n_chunks] return chunk_range
4de86cbeba03550d5bcd7bae68c054495136a398
1,463
def train(data_base_path, output_dir, label_vocab_path, hparams_set_name, train_fold, eval_fold): """Constructs trains, and evaluates a model on the given input data. Args: data_base_path: str. Directory path containing tfrecords named like "train", "dev" and "test" output_dir: str. Path to save checkpoints. label_vocab_path: str. Path to tsv file containing columns _VOCAB_ITEM_COLUMN_NAME and _VOCAB_INDEX_COLUMN_NAME. See testdata/label_vocab.tsv for an example. hparams_set_name: name of a function in the hparams module which returns a tf.contrib.training.HParams object. train_fold: fold to use for training data (one of protein_dataset.DATA_FOLD_VALUES) eval_fold: fold to use for training data (one of protein_dataset.DATA_FOLD_VALUES) Returns: A tuple of the evaluation metrics, and the exported objects from Estimator. """ hparams = get_hparams(hparams_set_name) label_vocab = parse_label_vocab(label_vocab_path) (estimator, train_spec, eval_spec) = _make_estimator_and_inputs( hparams=hparams, label_vocab=label_vocab, data_base_path=data_base_path, output_dir=output_dir, train_fold=train_fold, eval_fold=eval_fold) return tf.estimator.train_and_evaluate( estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
7df17924a7be5ec07009658a7aaf25e79f8f4663
1,466
def _enzyme_path_to_sequence(path, graph, enzymes_sites): """Converts a path of successive enzymes into a sequence.""" return "".join( [enzymes_sites[path[0]]] + [graph[(n1, n2)]["diff"] for n1, n2 in zip(path, path[1:])] )
a3de9de5dc37df641e36d09d07b49c402fa17fd1
1,468
def profile_to_section(profile_name): """Converts a profile name to a section header to be used in the config.""" if any(c in _WHITESPACE for c in profile_name): profile_name = shlex_quote(profile_name) return 'profile %s' % profile_name
c9c50556409c4840c7f530e8645b52b60b3f8fa7
1,469
def BytesFromFile(filename: str) -> ByteList: """Read the EDID from binary blob form into list form. Args: filename: The name of the binary blob. Returns: The list of bytes that make up the EDID. """ with open(filename, "rb") as f: chunk = f.read() return [int(x) for x in bytes(chunk)]
bfb899c2d1114f43ccbe4b317496111372e4bf2c
1,470
def array_to_patches(arr, patch_shape=(3,3,3), extraction_step=1, normalization=False): #Make use of skleanr function extract_patches #https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/feature_extraction/image.py """Extracts patches of any n-dimensional array in place using strides. Given an n-dimensional array it will return a 2n-dimensional array with the first n dimensions indexing patch position and the last n indexing the patch content. Parameters ---------- arr : 3darray 3-dimensional array of which patches are to be extracted patch_shape : integer or tuple of length arr.ndim Indicates the shape of the patches to be extracted. If an integer is given, the shape will be a hypercube of sidelength given by its value. extraction_step : integer or tuple of length arr.ndim Indicates step size at which extraction shall be performed. If integer is given, then the step is uniform in all dimensions. Returns ------- patches : strided ndarray 2n-dimensional array indexing patches on first n dimensions and containing patches on the last n dimensions. These dimensions are fake, but this way no data is copied. A simple reshape invokes a copying operation to obtain a list of patches: result.reshape([-1] + list(patch_shape)) """ patches = extract_patches(arr, patch_shape, extraction_step) patches = patches.reshape(-1, patch_shape[0],patch_shape[1],patch_shape[2]) # patches = patches.reshape(patches.shape[0], -1) if normalization==True: patches -= np.mean(patches, axis=0) patches /= np.std(patches, axis=0) print('%.2d patches have been extracted' % patches.shape[0]) , return patches
2c8e3660c0a9d67794e3d0e869ee87ea690bedc2
1,471
from typing import Optional from typing import Tuple def directory_select(message: str, default: Optional[str] = None, cli_flag: Optional[str] = None, force_interactive: bool = False) -> Tuple[int, str]: """Display a directory selection screen. :param str message: prompt to give the user :param default: default value to return (if one exists) :param str cli_flag: option used to set this value with the CLI :param bool force_interactive: True if it's safe to prompt the user because it won't cause any workflow regressions :returns: tuple of the form (`code`, `string`) where `code` - display exit code `string` - input entered by the user """ return obj.get_display().directory_select(message, default=default, cli_flag=cli_flag, force_interactive=force_interactive)
093a29a03c00c21b5612f78ac4548b0c6db6974c
1,472
def trade_from_kraken(kraken_trade): """Turn a kraken trade returned from kraken trade history to our common trade history format""" currency_pair = kraken_to_world_pair(kraken_trade['pair']) quote_currency = get_pair_position(currency_pair, 'second') return Trade( # Kraken timestamps have floating point ... timestamp=convert_to_int(kraken_trade['time'], accept_only_exact=False), pair=currency_pair, type=kraken_trade['type'], rate=FVal(kraken_trade['price']), cost=FVal(kraken_trade['cost']), cost_currency=quote_currency, fee=FVal(kraken_trade['fee']), fee_currency=quote_currency, amount=FVal(kraken_trade['vol']), location='kraken' )
6b18a1d396605450f1af6fc1cfb2231852c964a9
1,473
def _create_instancer_mesh(positions: np.ndarray, name="mesh_points", *, bpy): """Create mesh with where each point is a pseudo face (three vertices at the same position. """ assert positions.ndim == 2 assert positions.shape[1] == 3 if name in bpy.data.meshes: raise RuntimeError("Mesh '{}' already exists.".format(name)) mesh = bpy.data.meshes.new(name=name) num_vertices = len(positions) mesh.vertices.add(num_vertices * 3) mesh.vertices.foreach_set("co", np.repeat(positions, 3, axis=0).reshape((-1))) mesh.loops.add(num_vertices * 3) mesh.loops.foreach_set("vertex_index", np.arange(0, 3 * num_vertices)) loop_start = np.arange(0, 3 * num_vertices, 3, np.int32) loop_total = np.full(fill_value=3, shape=(num_vertices,), dtype=np.int32) num_loops = loop_start.shape[0] mesh.polygons.add(num_loops) mesh.polygons.foreach_set("loop_start", loop_start) mesh.polygons.foreach_set("loop_total", loop_total) mesh.update() mesh.validate() logger.info("Created instancer mesh with {} vertices.".format(len(positions))) return mesh
d60cd53cd6b00e0c17df8ee33be38f48aafebe8e
1,474
def bound_to_nitorch(bound, as_type='str'): """Convert boundary type to niTorch's convention. Parameters ---------- bound : [list of] str or bound_like Boundary condition in any convention as_type : {'str', 'enum', 'int'}, default='str' Return BoundType or int rather than str Returns ------- bound : [list of] str or BoundType Boundary condition in NITorch's convention """ intype = type(bound) if not isinstance(bound, (list, tuple)): bound = [bound] obound = [] for b in bound: b = b.lower() if isinstance(b, str) else b if b in ('replicate', 'repeat', 'border', 'nearest', BoundType.replicate): obound.append('replicate') elif b in ('zero', 'zeros', 'constant', BoundType.zero): obound.append('zero') elif b in ('dct2', 'reflect', 'reflection', 'neumann', BoundType.dct2): obound.append('dct2') elif b in ('dct1', 'mirror', BoundType.dct1): obound.append('dct1') elif b in ('dft', 'wrap', 'circular', BoundType.dft): obound.append('dft') elif b in ('dst2', 'antireflect', 'dirichlet', BoundType.dst2): obound.append('dst2') elif b in ('dst1', 'antimirror', BoundType.dst1): obound.append('dst1') else: raise ValueError(f'Unknown boundary condition {b}') if as_type in ('enum', 'int', int): obound = list(map(lambda b: getattr(BoundType, b), obound)) if as_type in ('int', int): obound = [b.value for b in obound] if issubclass(intype, (list, tuple)): obound = intype(obound) else: obound = obound[0] return obound
9767dbc3693fd105fed9d89b15340d2ba4d1c5dd
1,475
def Amp(f: jnp.ndarray, theta: jnp.ndarray) -> jnp.ndarray: """ Computes the Taylor F2 Frequency domain strain waveform with non-standard spin induced quadrupoole moment for object two. Note that this waveform assumes object 1 is a BH and therefore uses the chi * M_total relation to find C Note that this waveform also assumes that object one is the more massive. Therefore the more massive object is always considered a BH Returns: Strain (array): """ # ( # th0, # th3, # _, # _, # _, # _, # ) = theta # M_chirp = ( # 1 / (16 * pi * f[0]) * (125 / (2 * th0 ** 3)) ** (1 / 5) * C ** 3 / G # ) / MSUN # eta = (16 * pi ** 5 / 25 * th0 ** 2 / th3 ** 5) ** (1 / 3) # Mt = M_chirp / eta ** (3 / 5) # ( # Mt, # eta, # _, # _, # ) = theta m1, m2, _, _ = theta Mt = m1 + m2 eta = m1 * m2 / (m1 + m2) ** 2 distance = 1.0 pre = 3.6686934875530996e-19 # (GN*Msun/c^3)^(5/6)/Hz^(7/6)*c/Mpc/sec Mchirp = Mt * eta ** 0.6 A0 = ( Mchirp ** (5.0 / 6.0) / (f + 1e-100) ** (7.0 / 6.0) / distance / pi ** (2.0 / 3.0) * jnp.sqrt(5.0 / 24.0) ) return pre * A0
d618b760d8be0fe9e597eb3a2deefec455489349
1,477
def spol(f, g): """ Compute the S-polynomial of f and g. INPUT: - ``f, g`` -- polynomials OUTPUT: the S-polynomial of f and g EXAMPLES:: sage: R.<x,y,z> = PolynomialRing(QQ) sage: from sage.rings.polynomial.toy_buchberger import spol sage: spol(x^2 - z - 1, z^2 - y - 1) x^2*y - z^3 + x^2 - z^2 """ fg_lcm = LCM(LM(f), LM(g)) return fg_lcm//LT(f)*f - fg_lcm//LT(g)*g
fff84c00b85fda2f4ebfc3e8bbf1caa68b206490
1,479
import functools def evaluate_baselines(experiment, seed, num_pairs, samples_per_pair, loop_size=None): """Helper function to evaluate the set of baselines.""" gumbel_max_joint_fn = functools.partial( coupling_util.joint_from_samples, coupling_util.gumbel_max_sampler, num_samples=samples_per_pair, loop_size=loop_size) return { "Independent": evaluate_joint( lambda p, q, _: coupling_util.independent_coupling(p, q), experiment, seed, num_pairs), "ICDF": evaluate_joint( lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "ICDF (permuted)": evaluate_joint( lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q), experiment, seed, num_pairs), "Gumbel-max": evaluate_joint( gumbel_max_joint_fn, experiment, seed, num_pairs, joint_correction_num_samples=samples_per_pair), }
555ea777ff1f694fd2ed2846f1e8cb1ca01cccd7
1,480
def generate_template_mask(protein): """Generate template mask.""" protein['template_mask'] = np.ones(shape_list(protein['template_domain_names']), dtype=np.float32) return protein
f92304249db66b4d7a28336c60c0fd4ce803da0f
1,481
from scipy.interpolate import InterpolatedUnivariateSpline as spline def minimal_rotation(R, t, iterations=2): """Adjust frame so that there is no rotation about z' axis The output of this function is a frame that rotates the z axis onto the same z' axis as the input frame, but with minimal rotation about that axis. This is done by pre-composing the input rotation with a rotation about the z axis through an angle gamma, where dgamma/dt = 2*(dR/dt * z * R.conjugate()).w This ensures that the angular velocity has no component along the z' axis. Note that this condition becomes easier to impose the closer the input rotation is to a minimally rotating frame, which means that repeated application of this function improves its accuracy. By default, this function is iterated twice, though a few more iterations may be called for. Parameters ========== R: quaternion array Time series describing rotation t: float array Corresponding times at which R is measured iterations: int [defaults to 2] Repeat the minimization to refine the result """ if iterations == 0: return R R = quaternion.as_float_array(R) Rdot = np.empty_like(R) for i in range(4): Rdot[:, i] = spline(t, R[:, i]).derivative()(t) R = quaternion.from_float_array(R) Rdot = quaternion.from_float_array(Rdot) halfgammadot = quaternion.as_float_array(Rdot * quaternion.z * R.conjugate())[:, 0] halfgamma = spline(t, halfgammadot).antiderivative()(t) Rgamma = np.exp(quaternion.z * halfgamma) return minimal_rotation(R * Rgamma, t, iterations=iterations-1)
a1bd333ec9a01825a5355f47a80e14e37d510fae
1,482
def get_info(api_key: hug.types.text, hug_timer=20): """Return 'getinfo' data from the Gridcoin Research client!""" if (api_key == api_auth_key): # Valid API Key! response = request_json("getinfo", None) if (response == None): return {'success': False, 'api_key': True} else: return {'success': True, 'api_key': True, 'result': response, 'time_taken': hug_timer} else: # Invalid API Key! return {'success': False, 'api_key': False}
ff4eb5df57cf9faa0464040d9f51040742a8f549
1,483
def get_output_col_names(perils, factors): """Column names of the output data frame that contains `perils` and `factors`""" return ( PCon.RAW_STRUCT['stem']['col_names'] + [per + PCon.OUTPUT_DEFAULTS['pf_sep'] + fac for per, fac in pd.MultiIndex.from_product( [perils, [PCon.RAW_STRUCT['bp_name']] + factors] )] )
ce455a87aeba1f7d4f02b7f1b0e25c4d3eafdd0f
1,484
def _get_real_path(workspace_path): """Converts the given workspace path into an absolute path. A tuple of a real path and an error is returned. In this tuple, either the real path or error is present. The error is present in the returned tuple either if no workspace dir is given or the generated real path is not under the working directory. """ if not workspace_path: return (None, 'No path is given') root_dir = _get_root_dir(trailing_separator=False) path = _to_real_path(root_dir, workspace_path) return (path, None) if path.startswith(root_dir) else (None, 'Not authorized')
75e0d08b288cc947987b96787f368daeef586612
1,485
import string def simple_caesar(txt, rot=7): """Caesar cipher through ASCII manipulation, lowercase only.""" alphabet = string.ascii_lowercase # pick alphabet shifted_alphabet = alphabet[rot:] + alphabet[:rot] # shift it table = str.maketrans(alphabet, shifted_alphabet) # create mapping table return txt.lower().translate(table) # apply
eb8d86d37d8a8902663ff68e095b3b822225859c
1,486