content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def merge_image_data(dir_dict, output_image_file, logg): """ Merge image data in dir_dict. Parameters ---------- base_dir : str base directory of dir_dict dir_dict : dictionary containing pairs of directories and associated files output_image_file : str output image file string Returns ------- (err_code, file_list) err_code : int Non-zero value indicates error code, or zero on success. file_list : list of files merged """ file_list = [] for direct in dir_dict: file_name = dir_dict[direct] if file_name != "": file_list.append(os.path.join(direct, file_name)) if len(file_list) > 0: command_list = ["montage"] + file_list + ["-mode"] + ["Concatenate"] + [output_image_file] ret = run_cmd(command_list, logg) return (ret, file_list) else: return (-1, file_list)
77a090d08cdbfa2c4a5daf75f2393557d27105ec
1,300
def case_mc2us(x): """ mixed case to underscore notation """ return case_cw2us(x)
13cd638311bea75699789a2f13b7a7d854f856bd
1,301
import argparse import sys def _parse_arguments(): """ Constructs and parses the command line arguments for eg. Returns an args object as returned by parser.parse_args(). """ parser = argparse.ArgumentParser( description='eg provides examples of common command usage.' ) parser.add_argument( '-v', '--version', action='store_true', help='Display version information about eg' ) parser.add_argument( '-f', '--config-file', help='Path to the .egrc file, if it is not in the default location.' ) parser.add_argument( '-e', '--edit', action='store_true', help="""Edit the custom examples for the given command. If editor-cmd is not set in your .egrc and $VISUAL and $EDITOR are not set, prints a message and does nothing.""" ) parser.add_argument( '--examples-dir', help='The location to the examples/ dir that ships with eg' ) parser.add_argument( '-c', '--custom-dir', help='Path to a directory containing user-defined examples.' ) parser.add_argument( '-p', '--pager-cmd', help='String literal that will be invoked to page output.' ) parser.add_argument( '-l', '--list', action='store_true', help='Show all the programs with eg entries.' ) parser.add_argument( '--color', action='store_true', dest='use_color', default=None, help='Colorize output.' ) parser.add_argument( '-s', '--squeeze', action='store_true', default=None, help='Show fewer blank lines in output.' ) parser.add_argument( '--no-color', action='store_false', dest='use_color', help='Do not colorize output.' ) parser.add_argument( 'program', nargs='?', help='The program for which to display examples.' ) args = parser.parse_args() if len(sys.argv) < 2: # Too few arguments. We can't specify this using argparse alone, so we # have to manually check. parser.print_help() parser.exit() elif not args.version and not args.list and not args.program: parser.error(_MSG_BAD_ARGS) else: return args
233dd4d9a61529aaba7c02b32d56839acc203799
1,302
def detail_url(reteta_id): """"Return reteta detail URL""" return reverse('reteta:reteta-detail', args=[reteta_id])
4b7219b5e0d7ae32656766a08c34f54a02d1634e
1,303
def load_metadata_txt(file_path): """ Load distortion coefficients from a text file. Parameters ---------- file_path : str Path to a file. Returns ------- tuple of floats and list Tuple of (xcenter, ycenter, list_fact). """ if ("\\" in file_path): raise ValueError( "Please use a file path following the Unix convention") with open(file_path, 'r') as f: x = f.read().splitlines() list_data = [] for i in x: list_data.append(float(i.split()[-1])) xcenter = list_data[0] ycenter = list_data[1] list_fact = list_data[2:] return xcenter, ycenter, list_fact
44e6319aec6d77910e15e8890bcd78ffcdca3aa4
1,304
import torch def _output_gradient(f, loss_function, dataset, labels, out0, batch_indices, chunk): """ internal function """ x = _getitems(dataset, batch_indices) y = _getitems(labels, batch_indices) if out0 is not None: out0 = out0[batch_indices] out = [] grad = 0 loss_value = 0 for i in [slice(i, i + chunk) for i in range(0, len(x), chunk)]: o = f(x[i]) if out0 is not None: o = o - out0[i] l = loss_function(o, y[i]) assert l.shape == (len(o),) l = l.sum() / len(x) grad += gradient(l, f.parameters()) out.append(o) loss_value += l.item() return torch.cat(out), grad, loss_value
252f79065ce953eb99df17842d62786cebadee67
1,305
def __material_desc_dict(m, d): """ Unpack positions 18-34 into material specific dict. """ return dict(zip(MD_FIELDS[m], {"BK": __material_bk, "CF": __material_cf, "MP": __material_mp, "MU": __material_mu, "CR": __material_cr, "VM": __material_vm, "MX": __material_mx}[m](d)))
9f87ce915bd5d226fa1d1ffd5991779c9a4fbdba
1,306
def toint(x): """Try to convert x to an integer number without raising an exception.""" try: return int(x) except: return x
bd1a675cb3f8f5c48e36f8f405a89dc637f3f558
1,307
def obtain_time_image(x, y, centroid_x, centroid_y, psi, time_gradient, time_intercept): """Create a pulse time image for a toymodel shower. Assumes the time development occurs only along the longitudinal (major) axis of the shower, and scales linearly with distance along the axis. Parameters ---------- x : u.Quantity[length] X camera coordinate to evaluate the time at. Usually the array of pixel X positions y : u.Quantity[length] Y camera coordinate to evaluate the time at. Usually the array of pixel Y positions centroid_x : u.Quantity[length] X camera coordinate for the centroid of the shower centroid_y : u.Quantity[length] Y camera coordinate for the centroid of the shower psi : convertible to `astropy.coordinates.Angle` rotation angle about the centroid (0=x-axis) time_gradient : u.Quantity[time/length] Rate at which the time changes with distance along the shower axis time_intercept : u.Quantity[time] Pulse time at the shower centroid Returns ------- float or ndarray Pulse time in nanoseconds at (x, y) """ longitudinal, _ = camera_to_shower_coordinates(x, y, centroid_x, centroid_y, psi) longitudinal_m = longitudinal.to_value(u.m) time_gradient_ns_m = time_gradient.to_value(u.ns / u.m) time_intercept_ns = time_intercept.to_value(u.ns) return longitudinal_m * time_gradient_ns_m + time_intercept_ns
4a57399e041c0fd487fe039e5091986438d4b8b8
1,308
import re def remove_comment(to_remove, infile): """Removes trailing block comments from the end of a string. Parameters: to_remove: The string to remove the comment from. infile: The file being read from. Returns: The paramter string with the block comment removed (if comment was present in string). """ start_comment = re.search('\s*(\/\*|//)', to_remove) # Remove comments if they are in the matched group. if start_comment: end_comment = re.search('.*\*\/', to_remove) if end_comment or ('//' in to_remove and not '/*' in to_remove) : removed = to_remove[:start_comment.start(0)] + '\n' return removed while not end_comment: to_remove = next(infile) end_comment = end_comment = re.search('.*\*\/', to_remove) return '' else: removed = to_remove return removed
0172b295c9a023eb96fbad7a6c3a388874e106bc
1,309
def generate_notification_header(obj): """ Generates notification header information based upon the object -- this is used to preface the notification's context. Could possibly be used for "Favorites" descriptions as well. :param obj: The top-level object instantiated class. :type obj: class which inherits from :class:`crits.core.crits_mongoengine.CritsBaseAttributes`. :returns: str with a human readable identification of the object """ generate_notification_header_handler = NotificationHeaderManager.get_header_handler(obj._meta['crits_type']) if generate_notification_header_handler is not None: return generate_notification_header_handler(obj) else: return "%s: %s" % (obj._meta['crits_type'], str(obj.id))
e02c2bdd9827077a49236ed7aa813458659f453c
1,310
def promptyn(msg, default=None): """ Display a blocking prompt until the user confirms """ while True: yes = "Y" if default else "y" if default or default is None: no = "n" else: no = "N" confirm = raw_input("%s [%s/%s]" % (msg, yes, no)) confirm = confirm.lower().strip() if confirm == "y" or confirm == "yes": return True elif confirm == "n" or confirm == "no": return False elif len(confirm) == 0 and default is not None: return default
1bec535462b8e859bac32c424e8500c432eb7751
1,311
def plan_launch_spec(state): """ Read current job params, and prescribe the next training job to launch """ last_run_spec = state['run_spec'] last_warmup_rate = last_run_spec['warmup_learning_rate'] add_batch_norm = last_run_spec['add_batch_norm'] learning_rate = last_run_spec['learning_rate'] if last_warmup_rate / 5 >= 1e-3: logger.info('Reducing warmup rate by 1/5') state['history']['num_warmup_adjustments'] += 1 state['run_spec']['warmup_learning_rate'] = last_warmup_rate * 0.5 state['next_action'] = 'launch_new' elif add_batch_norm == 0: logger.info('Adding batch normalization layer') state['history']['num_batch_layer_adjustments'] += 1 state['run_spec']['add_batch_norm'] = 1 # we are only changing the model by adding batch layers # prior to ELU. But can make more tweaks here. state['next_action'] = 'launch_new' elif learning_rate * 0.9 > 0.001: state['run_spec']['learning_rate'] = learning_rate * 0.9 state['history']['num_learning_rate_adjustments'] += 1 state['next_action'] = 'launch_new' else: state['next_action'] = 'end' return state
5fee797f24db05eccb49a5b10a9d88917987f905
1,312
def ssgenTxOut0(): """ ssgenTxOut0 is the 0th position output in a valid SSGen tx used to test out the IsSSGen function """ # fmt: off return msgtx.TxOut( value=0x00000000, # 0 version=0x0000, pkScript=ByteArray( [ 0x6a, # OP_RETURN 0x24, # 36 bytes to be pushed 0x94, 0x8c, 0x76, 0x5a, # 32 byte hash 0x69, 0x14, 0xd4, 0x3f, 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0xda, 0x2c, 0x2f, 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x52, 0xde, 0x3d, 0x7c, 0x00, 0xe3, 0x23, 0x21, # 4 byte height ] ), ) # fmt: on
3bee03ef9bc3a326fff381b6d2594c3ea4c909e7
1,313
def sexag_to_dec(sexag_unit): """ Converts Latitude and Longitude Coordinates from the Sexagesimal Notation to the Decimal/Degree Notation""" add_to_degree = (sexag_unit[1] + (sexag_unit[2]/60))/60 return sexag_unit[0]+add_to_degree
c9c4394920d2b483332eb4a81c0f0d9010179339
1,314
import apysc as ap from typing import Any from typing import Tuple def is_immutable_type(value: Any) -> bool: """ Get a boolean value whether specified value is immutable type or not. Notes ----- apysc's value types, such as the `Int`, are checked as immutable since these js types are immutable. Parameters ---------- value : Any Target value to check. Returns ------- result : bool If a specified value is immutable, then True will be set. """ immutable_types: Tuple = ( int, float, bool, str, complex, tuple, range, bytes, ap.Int, ap.Number, ap.String, ap.Boolean, ) if isinstance(value, immutable_types): return True return False
79538477528df2e13eaf806231e2f43c756abacd
1,315
def add_column_node_type(df: pd.DataFrame) -> pd.DataFrame: """Add column `node_type` indicating whether a post is a parent or a leaf node Args: df: The posts DataFrame with the columns `id_post` and `id_parent_post`. Returns: df: A copy of df, extended by `node_type`. """ if "node_type" not in df.columns: df_parent_posts = pd.DataFrame({"id_post": df.query("id_parent_post == id_parent_post").id_parent_post.unique()}) df_parent_posts["node_type"] = "parent" return df.merge(df_parent_posts, how="left", on="id_post").replace({"node_type": np.nan}, "leaf") else: return df.copy()
3ad8a12f1a872d36a14257bdaa38229768714fa5
1,316
import random def read_motifs(fmotif): """ create a random pool of motifs to choose from for the monte-carlo simulations """ motif_pool = [] for line in open(fmotif): if not line.strip(): continue if line[0] == "#": continue motif, count = line.rstrip().split() motif_pool.extend(motif * int(count)) random.shuffle(motif_pool) return motif_pool
168a7f82727917aa5ca1a30b9aa9df1699261585
1,317
from App import Proxys import math def createCone( axis=1, basePos=-1, tipPos=1, radius=1, colour=(0.6,0.6,0.6), moiScale = 1, withMesh = True, **kwargs ): """ Create a rigid body for a cone with the specified attributes (axis is 0:x, 1:y, 2:z). Other rigid body parameters can be specified with keyword arguments, look at App.Proxys.RigidBody for more details on available arguments. The following arguments will not be used: meshes, moi, cdps. If a negative mass parameter is specified, it will be scaled by the box volume and made positive. """ _fixMass( kwargs, math.pi*radius*radius*math.fabs(tipPos-basePos) ) proxy = Proxys.RigidBody( **kwargs ) return _createCone( proxy, axis, basePos, tipPos, radius, colour, moiScale, withMesh )
43a7e0134627ed8069359c29bc53f354d70498d9
1,318
from typing import Union def ef(candles: np.ndarray, lp_per: int = 10, hp_per: int = 30, f_type: str = "Ehlers", normalize: bool = False, source_type: str = "close", sequential: bool = False) -> Union[ float, np.ndarray]: # added to definition : use_comp: bool = False, comp_intensity: float = 90.0, """ https://www.tradingview.com/script/kPe86Nbc-Roofing-Filter-DW/ compression function not working """ candles = slice_candles(candles, sequential) source = get_candle_source(candles, source_type=source_type) if f_type == "Ehlers": roof = erf( source, hp_per, lp_per) elif f_type == "Gaussian": roof = grf( source, hp_per, lp_per) elif f_type == "Butterworth": roof = brf( source, hp_per, lp_per) rms = RMS(source, roof, np.round((hp_per + lp_per)/2)) if roof[-1] > 0: norm_roof = roof/rms elif roof[-1] < 0: norm_roof = -np.abs(roof)/rms else: norm_roof = 0 if normalize: filt = norm_roof else: filt = roof if sequential: return filt else: return filt[-1]
6dd19e9a1cb5a8f293f4ec3eebef625e2b05bcfe
1,319
from typing import Dict def parse_displays(config: Dict) -> Dict[str, QueryDisplay]: """Parse display options from configuration.""" display_configs = config.get("displays") if not display_configs: return {} displays = {} for name, display_config in display_configs.items(): displays[name] = QueryDisplay( name=name, nrql=display_config.get("nrql"), visualization=WidgetVisualization.from_str(display_config["visualization"]), ) return displays
a7f3c32d3ceaf6c39ea16ee7e2f7ec843036487e
1,320
async def update_result(user: dict, form: dict) -> str: """Extract form data and update one result and corresponding start event.""" informasjon = await create_finish_time_events(user, "finish_bib", form) # type: ignore return informasjon
b9b97f3b08f08dc35a0744f38323d76ecb0c3fba
1,321
from typing import List import torch import copy def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask
98a35b477338f0f472d34b49f4be9f9cd0303654
1,322
def has_ao_num(trexio_file) -> bool: """Check that ao_num variable exists in the TREXIO file. Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function. Returns: True if the variable exists, False otherwise Raises: - Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error. - Exception from some other error (e.g. RuntimeError). """ try: rc = pytr.trexio_has_ao_num(trexio_file.pytrexio_s) if rc == TREXIO_FAILURE: raise Error(rc) except: raise if rc == TREXIO_SUCCESS: return True else: return False
6a10204cc5d64a71e991fed1e43fd9ff81a250b9
1,323
def teapot(size=1.0): """ Z-axis aligned Utah teapot Parameters ---------- size : float Relative size of the teapot. """ vertices, indices = data.get("teapot.obj") xmin = vertices["position"][:,0].min() xmax = vertices["position"][:,0].max() ymin = vertices["position"][:,1].min() ymax = vertices["position"][:,1].max() zmin = vertices["position"][:,2].min() zmax = vertices["position"][:,2].max() # Centering vertices["position"][:,0] -= xmin + (xmax-xmin)/2 vertices["position"][:,1] -= ymin + (ymax-ymin)/2 vertices["position"][:,2] -= zmin + (zmax-zmin)/2 # Rotation to align on Z-axis X = vertices["position"][:,0].copy() Y = vertices["position"][:,1].copy() Z = vertices["position"][:,2].copy() NX = vertices["normal"][:,0].copy() NY = vertices["normal"][:,1].copy() NZ = vertices["normal"][:,2].copy() vertices["position"][:,0] = X vertices["position"][:,1] = Z vertices["position"][:,2] = Y vertices["normal"][:,0] = NX vertices["normal"][:,1] = NZ vertices["normal"][:,2] = NY # Scaling according to height vertices["position"] *= 2.0*size/(zmax-zmin) return vertices, indices
94cef5111384599f74bfe59fb97ba417c738ca50
1,324
def f30(x, rotations=None, shifts=None, shuffles=None): """ Composition Function 10 (N=3) Args: x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100. rotations (matrix): Optional rotation matrices (NxDxD). If None (default), the official matrices from the benchmark suite will be used. shifts (array): Optional shift vectors (NxD). If None (default), the official vectors from the benchmark suite will be used. shuffles (array): Optional shuffle vectors (NxD). If None (default), the official permutation vectors from the benchmark suite will be used. """ nx = len(x) if rotations is None: rotations = transforms.rotations_cf[nx][9] if shifts is None: shifts = transforms.shifts_cf[9] if shuffles is None: shuffles = transforms.shuffles_cf[nx][1] N = 3 funcs = [hybrid.f15, hybrid.f18, hybrid.f19] sigmas = np.array([10.0, 30.0, 50.0]) biases = np.array([0.0, 100.0, 200.0]) offsets = np.array( [1500, 1800, 1900] ) # subtract F* added at the end of the functions vals = np.zeros(N) w = np.zeros(N) w_sm = 0.0 for i in range(0, N): x_shifted = x - shifts[i][:nx] vals[i] = funcs[i]( x, rotation=rotations[i], shift=shifts[i][:nx], shuffle=shuffles[i] ) vals[i] -= offsets[i] w[i] = _calc_w(x_shifted, sigmas[i]) w_sm += w[i] if w_sm != 0.0: w /= w_sm else: w = np.full(N, 1 / N) return np.sum(w * (vals + biases)) + 3000
d2bfe7a0bba501e1d7d5bcf29475ecc36f73913b
1,325
def loadNode( collada, node, localscope ): """Generic scene node loading from a xml `node` and a `collada` object. Knowing the supported nodes, create the appropiate class for the given node and return it. """ if node.tag == tag('node'): return Node.load(collada, node, localscope) elif node.tag == tag('translate'): return TranslateTransform.load(collada, node) elif node.tag == tag('rotate'): return RotateTransform.load(collada, node) elif node.tag == tag('scale'): return ScaleTransform.load(collada, node) elif node.tag == tag('matrix'): return MatrixTransform.load(collada, node) elif node.tag == tag('lookat'): return LookAtTransform.load(collada, node) elif node.tag == tag('instance_geometry'): return GeometryNode.load(collada, node) elif node.tag == tag('instance_camera'): return CameraNode.load(collada, node) elif node.tag == tag('instance_light'): return LightNode.load(collada, node) elif node.tag == tag('instance_controller'): return ControllerNode.load(collada, node) elif node.tag == tag('instance_node'): return NodeNode.load(collada, node, localscope) elif node.tag == tag('extra'): return ExtraNode.load(collada, node) elif node.tag == tag('asset'): return None else: raise DaeUnsupportedError('Unknown scene node %s' % str(node.tag))
68083c4490e44e71f33d1221776837f2c1d59b69
1,326
def create_xla_tff_computation(xla_computation, type_spec): """Creates an XLA TFF computation. Args: xla_computation: An instance of `xla_client.XlaComputation`. type_spec: The TFF type of the computation to be constructed. Returns: An instance of `pb.Computation`. """ py_typecheck.check_type(xla_computation, xla_client.XlaComputation) py_typecheck.check_type(type_spec, computation_types.FunctionType) return pb.Computation( type=type_serialization.serialize_type(type_spec), xla=pb.Xla( hlo_module=pack_xla_computation(xla_computation), parameter=_make_xla_binding_for_type(type_spec.parameter), result=_make_xla_binding_for_type(type_spec.result)))
5a02051913026029cab95d12199eb321fa511654
1,327
def render_contact_form(context): """ Renders the contact form which must be in the template context. The most common use case for this template tag is to call it in the template rendered by :class:`~envelope.views.ContactView`. The template tag will then render a sub-template ``envelope/contact_form.html``. .. versionadded:: 0.7.0 """ try: form = context['form'] except KeyError: raise template.TemplateSyntaxError("There is no 'form' variable in the template context.") return { 'form': form, }
e243502fadbf094ed7277ec5db770a3b209174e2
1,328
from typing import List from typing import Dict def get_basic_project(reviews: int = 0) -> List[Dict]: """Get basic project config with reviews.""" reviews = max(reviews, MIN_REVIEW) reviews = min(reviews, MAX_REVIEW) middle_stages, entry_point = _get_middle_stages(reviews, OUTPUT_NAME) input_stage = { "brickName": "labelset-input", "routing": { "nextStageName": entry_point, }, "stageName": "Input", "stageConfig": {}, } output_stage = { "brickName": "labelset-output", "stageName": OUTPUT_NAME, "routing": { "nextStageName": "END", }, "stageConfig": {}, } temp = [input_stage] + middle_stages + [output_stage] return temp
14c2252dec69ebbcec04fbd00de0fa5ac6d1cdf7
1,329
import re def choose_quality(link, name=None, selected_link=None): """ choose quality for scraping Keyword Arguments: link -- Jenitem link with sublinks name -- Name to display in dialog (default None) """ if name is None: name = xbmc.getInfoLabel('listitem.label') if link.startswith("http") or link.startswith("plugin"): sublinks = [link] else: jen_link = JenItem(link) sublinks = jen_link.getAll("sublink") if not sublinks: sublinks = [jen_link] links = [] message = get_link_message() if selected_link is None: default_link = ADDON.getSetting("default_link") else: default_link = selected_link link_dialog = ADDON.getSetting("use_link_dialog") == "true" direct_links = False for sublink in sublinks: if link_dialog and "search" in sublink: continue if "searchsd" in sublink: if default_link == "SD": return sublink label = 'SD' if message['SD'] != '': label += ' (%s)' % message['SD'] new_item = (label, sublink) elif "search" in sublink: if default_link == "HD": return sublink label = 'HD' if message['HD'] != '': label += ' (%s)' % message['HD'] new_item = (label, sublink) else: direct_links = True match = re.findall("(.*?)\((.*?)\)", sublink) if match: new_item = ('%s' % match[0][1], match[0][0]) else: new_item = ('Link %s' % (int(sublinks.index(sublink)) + 1), sublink) links.append(new_item) if link_dialog and (not direct_links or len(sublinks) > 1): links.append(("Search", "search")) if len(links) == 1: url = links[0][1] return url select = xbmcgui.Dialog().select(name, [i[0] for i in links]) if select == -1: return False else: url = links[select][1] return url
a75214cd0acd1c0e3ede34241baeb07342aadb1b
1,330
def picp_loss(target, predictions, total = True): """ Calculate 1 - PICP (see eval_metrics.picp for more details) Parameters ---------- target : torch.Tensor The true values of the target variable predictions : list - predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor) - predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor) total : bool, default = True - When total is set to True, return a scalar value for 1- PICP - When total is set to False, return 1-PICP along the horizon Returns ------- torch.Tensor Returns 1-PICP, either as a scalar or over the horizon """ return 1-picp(target, predictions, total)
a6d8d150241b1a2f8dda00c9c182ba7196c65585
1,331
import os def find_files_to_upload(upload_dir): """ Find the files which are named correctly and have a .asc file """ files = [] for name in os.listdir(upload_dir): asc_file = os.path.join(upload_dir, "{}.asc".format(name)) if valid_format(name) and os.path.isfile(asc_file): files.extend([name, "{}.asc".format(name)]) return files
f5647e7c30af16abe21ace1a9b488a947e4ecdb8
1,332
def index_wrap(data, index): """ Description: Select an index from an array data :param data: array data :param index: index (e.g. 1,2,3, account_data,..) :return: Data inside the position index """ return data[index]
42b53f1d9edf237b904f822c15ad1f1b930aa69c
1,333
def import_operand_definition( defdict, yaml, key, base_module, regs, force=False ): """ :param defdict: :param yaml: :param key: :param base_module: :param regs: """ try: entry = defdict[key] except KeyError: raise MicroprobeArchitectureDefinitionError( "'%s' key in %s " "file missing or not defined " "correctly." % (key, yaml) ) filenames = [yaml] + entry["YAML"] cache_filename = cache_file("%s.Operand" % (yaml)) result = update_cache_needed(filenames, cachefile=cache_filename) result = result or force entry["YAML_inherits"] = entry.get("YAML_inherits", []) if not result: LOG.debug("Reading cache contents for Operand") try: return read_cache_data(cache_filename), result except ImportError: LOG.exception("Unable to read cache contents for Operand") except MicroprobeCacheError: LOG.debug("Cache error when reading cache contents for Operand") try: data = base_module.import_definition( entry["YAML"], entry["YAML_inherits"], regs ) except KeyError: raise MicroprobeArchitectureDefinitionError( "'%s' key in %s " "file missing or not defined " "correctly." % (key, yaml) ) try: write_cache_data(cache_filename, data) except MicroprobeCacheError: LOG.debug("Cache error when writing cache contents for Operand") return data, result
79acc3d03ec3758cfe03be0f3cf3cd0bc4679c25
1,334
def mzml_to_pandas_df(filename): """ Reads mzML file and returns a pandas.DataFrame. """ cols = ["retentionTime", "m/z array", "intensity array"] slices = [] file = mzml.MzML(filename) while True: try: data = file.next() data["retentionTime"] = data["scanList"]["scan"][0]["scan time"] / 60 del data["scanList"] slices.append(pd.DataFrame(data)) except: break df = pd.concat(slices)[cols] df_to_numeric(df) return df
2c6f1956d7c499c9f22bc85665bd6b5ce9ed51c3
1,335
def metadata_volumes(response: Response, request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST), sourcetype: str=Query(None, title=opasConfig.TITLE_SOURCETYPE, description=opasConfig.DESCRIPTION_PARAM_SOURCETYPE), sourcecode: str=Query(None, title=opasConfig.TITLE_SOURCECODE, description=opasConfig.DESCRIPTION_SOURCECODE), limit: int=Query(200, title=opasConfig.TITLE_LIMIT, description=opasConfig.DESCRIPTION_LIMIT), offset: int=Query(0, title=opasConfig.TITLE_OFFSET, description=opasConfig.DESCRIPTION_OFFSET), ): """ ## Function <b>Return a list of volumes for a SourceCode (aka, PEPCode (e.g., IJP)) per the limit and offset parameters</b> ## Return Type models.JournalInfoList ## Status This endpoint is working. ## Sample Call http://localhost:9100/v1/Metadata/Volumes/CPS/ ## Notes ## Potential Errors """ ocd, session_info = opasAPISupportLib.get_session_info(request, response) # Solr is case sensitive, make sure arg is upper try: source_code = sourcecode.upper() except: source_code = None src_exists = ocd.get_sources(source_code=source_code) if not src_exists[0] and source_code != "*" and source_code != "ZBK" and source_code is not None: # ZBK not in productbase table without booknum response.status_code = httpCodes.HTTP_400_BAD_REQUEST status_message = f"Failure: Bad SourceCode {source_code}" ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX, session_info=session_info, params=request.url._url, item_of_interest=f"{source_code}", return_status_code=response.status_code, status_message=status_message ) raise HTTPException( status_code=response.status_code, detail=status_message ) else: try: ret_val = opasAPISupportLib.metadata_get_volumes(source_code, source_type=sourcetype, req_url=request.url, limit=limit, offset=offset) except Exception as e: response.status_code = httpCodes.HTTP_400_BAD_REQUEST, status_message = "Error: {}".format(e) ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX, session_info=session_info, params=request.url._url, item_of_interest=f"{source_code}", return_status_code=response.status_code, status_message=status_message ) raise HTTPException( status_code=response.status_code, detail=status_message ) else: response.status_code = httpCodes.HTTP_200_OK status_message = opasCentralDBLib.API_STATUS_SUCCESS # 2020-07-23 No need to log success for these, can be excessive. #ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX, #session_info=session_info, #params=request.url._url, #item_of_interest=f"{source_code}", #return_status_code=response.status_code, #status_message=status_message #) return ret_val
e8e4a686eaac21b20f2d758b8bc7de74d38571ab
1,336
def do_step_right(pos: int, step: int, width: int) -> int: """Takes current position and do 3 steps to the right. Be aware of overflow as the board limit on the right is reached.""" new_pos = (pos + step) % width return new_pos
530f3760bab00a7b943314ca735c3a11343b87f5
1,337
def log_agm(x, prec): """ Fixed-point computation of -log(x) = log(1/x), suitable for large precision. It is required that 0 < x < 1. The algorithm used is the Sasaki-Kanada formula -log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1] For faster convergence in the theta functions, x should be chosen closer to 0. Guard bits must be added by the caller. HYPOTHESIS: if x = 2^(-n), n bits need to be added to account for the truncation to a fixed-point number, and this is the only significant cancellation error. The number of bits lost to roundoff is small and can be considered constant. [1] Richard P. Brent, "Fast Algorithms for High-Precision Computation of Elementary Functions (extended abstract)", http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf """ x2 = (x*x) >> prec # Compute jtheta2(x)**2 s = a = b = x2 while a: b = (b*x2) >> prec a = (a*b) >> prec s += a s += (MPZ_ONE<<prec) s = (s*s)>>(prec-2) s = (s*isqrt_fast(x<<prec))>>prec # Compute jtheta3(x)**2 t = a = b = x while a: b = (b*x2) >> prec a = (a*b) >> prec t += a t = (MPZ_ONE<<prec) + (t<<1) t = (t*t)>>prec # Final formula p = agm_fixed(s, t, prec) return (pi_fixed(prec) << prec) // p
e873db3a45270eb077d9dc17f2951e2e791ad601
1,338
import unicodedata def simplify_name(name): """Converts the `name` to lower-case ASCII for fuzzy comparisons.""" return unicodedata.normalize('NFKD', name.lower()).encode('ascii', 'ignore')
a7c01471245e738fce8ab441e3a23cc0a67c71be
1,339
async def parse_regex(opsdroid, skills, message): """Parse a message against all regex skills.""" matched_skills = [] for skill in skills: for matcher in skill.matchers: if "regex" in matcher: opts = matcher["regex"] matched_regex = await match_regex(message.text, opts) if matched_regex: message.regex = matched_regex for regroup, value in matched_regex.groupdict().items(): message.update_entity(regroup, value, None) matched_skills.append( { "score": await calculate_score( opts["expression"], opts["score_factor"] ), "skill": skill, "config": skill.config, "message": message, } ) return matched_skills
aa3ad8ff48854b974ba90135b510074644e10028
1,340
def interpolate_minusones(y): """ Replace -1 in the array by the interpolation between their neighbor non zeros points y is a [t] x [n] array """ x = np.arange(y.shape[0]) ynew = np.zeros(y.shape) for ni in range(y.shape[1]): idx = np.where(y[:,ni] != -1)[0] if len(idx)>1: last_value = y[idx[-1],ni] interp = interp1d(x[idx],y[idx,ni], kind='previous',fill_value=(0,last_value),bounds_error = False) ynew[:,ni] = interp(x) elif len(idx) == 1: last_value = y[idx[-1],ni] ynew[:,ni] = last_value return ynew
db3e347ba75a39f40cd3ee90481efe8392ce08ed
1,341
def precision(y, yhat, positive=True): """Returns the precision (higher is better). :param y: true function values :param yhat: predicted function values :param positive: the positive label :returns: number of true positive predictions / number of positive predictions """ table = contingency_table(y, yhat, positive) return _precision(table)
f643631781565ddb049c1c4d22c6e5ea64ce4a22
1,342
def add_posibility_for_red_cross(svg): """add a symbol which represents a red cross in a white circle Arguments: svg {Svg} -- root element """ symbol = Svg(etree.SubElement(svg.root, 'symbol', {'id': 'red_cross', 'view_box': '0 0 20 20' })) symbol.create_circle( [10, 10], 9, "red_cross_circle", fill_colour="white", additional_arguments={'stroke': 'black'} ) symbol.create_rectangle( [4, 8], [12, 4], "red_cross_rect_1", fill_colour="red") symbol.create_rectangle( [8, 4], [4, 12], "red_cross_rect_2", fill_colour="red") return symbol
df621fb907187a36cb3f7387047a8cda6cb42992
1,343
import functools import sys def exceptions2exit(exception_list): """ Decorator to convert given exceptions to exit messages This avoids displaying nasty stack traces to end-users :param exception_list: list of exceptions to convert """ def exceptions2exit_decorator(func): @functools.wraps(func) def func_wrapper(*args, **kwargs): try: func(*args, **kwargs) except tuple(exception_list) as e: print("ERROR: {}".format(e)) sys.exit(1) return func_wrapper return exceptions2exit_decorator
8a73c334f03b81f8c96fd1d4e6691031f26c9896
1,344
def getTestSuite(select="unit"): """ Get test suite select is one of the following: "unit" return suite of unit tests only "component" return suite of unit and component tests "all" return suite of unit, component and integration tests "pending" return suite of pending tests name a single named test to be run """ testdict = { "unit": [ "testDummy" ], "zzcomponent": [ "testDummy" ], "integration": [ "testLoad" , "testStartStop" , "testVolume" ], "pending": [ "testDummy" ] } return TestUtils.getTestSuite(testITunes, testdict, select=select)
529cb8d6312eaa129a52f1679294d85c1d9bfbd0
1,345
import ctypes def dasopw(fname): """ Open a DAS file for writing. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopw_c.html :param fname: Name of a DAS file to be opened. :type fname: str :return: Handle assigned to the opened DAS file. """ fname = stypes.stringToCharP(fname) handle = ctypes.c_int(0) libspice.dasopw_c(fname, ctypes.byref(handle)) return handle.value
63f164ba82e6e135763969c8823d7eb46dd52c0e
1,346
import re def is_ncname(value): """ BNode identifiers must be valid NCNames. From the `W3C RDF Syntax doc <http://www.w3.org/TR/REC-rdf-syntax/#section-blank-nodeid-event>`_ "The value is a function of the value of the ``identifier`` accessor. The string value begins with "_:" and the entire value MUST match the `N-Triples nodeID <http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#nodeID>`_ production". The nodeID production is specified to be a `name <http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#name>`_ name ::= [A-Za-z][A-Za-z0-9]* >>> assert is_ncname('') == False >>> assert is_ncname('999') == False >>> assert is_ncname('x') == True >>> assert is_ncname(u'x') == True >>> assert is_ncname(u'Michèle') == True However, vanilla uuid4s are not necessarily NCNames: >>> assert is_ncname('6fa459ea-ee8a-3ca4-894e-db77e160355e') == False So this has to be finessed with an appropriate prefix ... >>> assert is_ncname("urn:uuid:"+str(uuid4())) == True >>> from rdflib import BNode >>> assert is_ncname(BNode(_sn_gen=bnode_uuid, _prefix="urn:uuid:")) == True """ ncnameexp = re.compile("[A-Za-z][A-Za-z0-9]*") if ncnameexp.match(value): return True else: return False
78cbfe9209b9f39cd6bc90c0ed5c8e5291bc1562
1,347
from typing import Dict def health_func() -> Dict[str, str]: """Give the user the API health.""" return "ok"
5c14795d9d0560ddb34b193575917ac184dbe8a3
1,348
def queue_worker(decoy: Decoy) -> QueueWorker: """Get a mock QueueWorker.""" return decoy.mock(cls=QueueWorker)
aec88b037e393b195abd0c2704e8f2784e9a9f8d
1,349
def astra_fp_3d(volume, proj_geom): """ :param proj_geom: :param volume: :return:3D sinogram """ detector_size = volume.shape[1] slices_number = volume.shape[0] rec_size = detector_size vol_geom = build_volume_geometry_3d(rec_size, slices_number) sinogram_id = astra.data3d.create('-sino', proj_geom) # Create a data object for the reconstruction rec_id = astra.data3d.create('-vol', vol_geom, data=volume) # Set up the parameters for a reconstruction algorithm using the GPU cfg = astra.astra_dict('FP3D_CUDA') cfg['VolumeDataId'] = rec_id cfg['ProjectionDataId'] = sinogram_id cfg['option'] = {} alg_id = astra.algorithm.create(cfg) astra.algorithm.run(alg_id, 1) res_sino = astra.data3d.get(sinogram_id) # Clean up. Note that GPU memory is tied up in the algorithm object, # and main RAM in the data objects. astra.algorithm.delete(alg_id) astra.data3d.delete(rec_id) astra.data3d.delete(sinogram_id) astra.clear() return res_sino
7066bb61dc29fac331ffb13c6fe1432349eac185
1,350
def get_wf_neb_from_images( parent, images, user_incar_settings, additional_spec=None, user_kpoints_settings=None, additional_cust_args=None, ): """ Get a CI-NEB workflow from given images. Workflow: NEB_1 -- NEB_2 - ... - NEB_n Args: parent (Structure): parent structure. images ([Structure]): All images and two endpoints. user_incar_settings([dict]): Additional user_incar_settings. Note that the order of the list is set as: "parent", "ep_relax", "neb1", "neb2" etc., which contains at least three elements. The first dict is for parent structure relaxation, the second dict is for endpoints relaxation, and the rest are for NEB calculations. For example, [{}, {}, {"IOPT": 7}, {"IOPT": 1}]. Besides, user_incar_settings is used to determine how many NEB rounds will be. Default is [{}, {}, {}]. additional_spec (dict): User spec settings to overwrite default_spec. user_kpoints_settings ([dict]): Additional user_kpoints_settings, which contains at at least three elements, which is similar to user_incar_settings. For example, [{}, {}, {"grid_density": 100}] for the workflow from the parent structure relaxation, then the endpoint relaxation followed by one-round NEB simulation. Default values depend on the selected VaspInputSet. additional_cust_args ([dict]): Optional parameters for RunVaspCustodian, same structure with user_incar_settings and user_kpoints_settings. Returns: Workflow """ spec = _update_spec(additional_spec) spec["parent"] = parent.as_dict() assert isinstance(images, list) and len(images) >= 3 spec["neb"] = [[s.as_dict() for s in images]] spec["_queueadapter"] = { "nnodes": str(len(images) - 2), "nodes": str(len(images) - 2), } if spec["neb_walltime"] is not None: spec["_queueadapter"].update({"walltime": spec.get("neb_walltime")}) wf_name = spec["wf_name"] # Assume one round NEB if user_incar_settings not provided. user_incar_settings = user_incar_settings or [{}, {}, {}] neb_round = len(user_incar_settings[2:]) user_kpoints_settings = user_kpoints_settings or [{"grid_density": 1000}] * ( neb_round + 2 ) additional_cust_args = additional_cust_args or [{}] * (neb_round + 2) fws = [] # Get neb fireworks. for n in range(neb_round): fw = NEBFW( spec=spec, neb_label=str(n + 1), from_images=True, user_incar_settings=user_incar_settings[n + 2], user_kpoints_settings=user_kpoints_settings[n + 2], additional_cust_args=additional_cust_args[n + 2], ) fws.append(fw) # Build fireworks link links_dict = {} if neb_round >= 2: for i in range(neb_round - 1): links_dict[fws[i]] = [fws[i + 1]] workflow = Workflow(fws, name=wf_name, links_dict=links_dict) return workflow
15ed110d3685c9d8de216733e8d87f6c07580529
1,351
def categorize_folder_items(folder_items): """ Categorize submission items into three lists: CDM, PII, UNKNOWN :param folder_items: list of filenames in a submission folder (name of folder excluded) :return: a tuple with three separate lists - (cdm files, pii files, unknown files) """ found_cdm_files = [] unknown_files = [] found_pii_files = [] for item in folder_items: if _is_cdm_file(item): found_cdm_files.append(item) elif _is_pii_file(item): found_pii_files.append(item) else: if not (_is_known_file(item) or _is_string_excluded_file(item)): unknown_files.append(item) return found_cdm_files, found_pii_files, unknown_files
14e840817cce4cc91ed50d6d9dcfa1c19a2bcbeb
1,352
def _broadcast_all(indexArrays, cshape): """returns a list of views of 'indexArrays' broadcast to shape 'cshape'""" result = [] for i in indexArrays: if isinstance(i, NDArray) and i._strides is not None: result.append(_broadcast(i, cshape)) else: result.append(i) return tuple(result)
b7b98245bc534074e408d5c9592bf68ae53f580e
1,353
def _none_tozero_array(inarray, refarray): """Repair an array which is None with one which is not by just buiding zeros Attributes inarray: numpy array refarray: numpy array """ if inarray is None: if _check_ifarrays([refarray]): inarray = np.zeros_like(refarray) else: if not _check_ifarrays([inarray]): inarray = None return inarray
9b0852655a13b572106acc809d842ca38d24e707
1,354
def dpuGetExceptionMode(): """ Get the exception handling mode for runtime N2Cube Returns: Current exception handing mode for N2Cube APIs. Available values include: - N2CUBE_EXCEPTION_MODE_PRINT_AND_EXIT - N2CUBE_EXCEPTION_MODE_RET_ERR_CODE """ return pyc_libn2cube.pyc_dpuGetExceptionMode()
fd33aba868a05f3cc196c89e3c2d428b0cce108a
1,355
import re def clean_links(links, category): """ clean up query fields for display as category buttons to browse by :param links: list of query outputs :param category: category of search from route :return: list of cleansed links """ cleansedlinks = [] for item in links: # remove blanks if item == "" or item == "-": continue else: #crop chromosome location output to eg 13p (check if substrate) if category[:3] == 'Sub': item = re.search("[\d|X|Y]+[pq]", item).group(0) # remove forward slashes item = item.replace("/", "&F&") if item not in cleansedlinks: cleansedlinks.append(item) # sort the links cleansedlinks.sort() return cleansedlinks
f43af81a8ef8e5520726e886dd74d991c999a32d
1,356
from typing import Any from typing import Optional def as_bool(value: Any, schema: Optional[BooleanType] = None) -> bool: """Parses value as boolean""" schema = schema or BooleanType() value = value.decode() if isinstance(value, bytes) else value if value: value = str(value).lower() value = BOOLEANS.get(value) validation.validate(schema.as_dict(), value) return value
7085b7bc7eccb2db95f5645b358e4940914f68f9
1,357
from typing import Dict from typing import List from typing import Tuple def get_raw_feature( column: Text, value: slicer_lib.FeatureValueType, boundaries: Dict[Text, List[float]] ) -> Tuple[Text, slicer_lib.FeatureValueType]: """Get raw feature name and value. Args: column: Raw or transformed column name. value: Raw or transformed column value. boundaries: Dictionary containing quantile boundaries of features keyed by column name. Returns: Tuple of raw column name and raw column value. """ if column.startswith(auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX): raw_feature = column[len(auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX ):] (start, end) = auto_slice_key_extractor.get_bucket_boundary( value, boundaries[raw_feature]) return (raw_feature, _format_boundary(start, end)) return (column, value)
29323b8e1a7ef32f19ff94f31efca20567780aa4
1,358
from typing import Union def ndmi(nir: Union[xr.DataArray, np.ndarray, float, int], swir1: Union[xr.DataArray, np.ndarray, float, int]) -> \ Union[xr.DataArray, np.ndarray, float, int]: """ Normalized difference moisture index. Sentinel-2: B8A, B11 Parameters ---------- nir : xr.DataArray or np.ndarray or float or int Near infrared band acquisition. swir1 : xr.DataArray or np.ndarray or float or int Short wave infrared band acquisition. Returns ------- same as input: Normalised difference moisture index. """ return utils.normalized_difference(nir, swir1)
f66a68cd75d9c030c0257e1d543c2caf9efcf652
1,359
def _collect_data_and_enum_definitions(parsed_models: dict) -> dict[str, dict]: """ Collect all data and enum definitions that are referenced as interface messages or as a nested type within an interface message. Args: parsed_models: A dict containing models parsed from an AaC yaml file. Returns: A dict of data message type keys to data message parsed model values """ def collect_nested_types(interface_data_message_types: list[str]): nested_types = [] for message_type in interface_data_message_types: data_model = parsed_models[message_type]["data"] for field in data_model.get("fields"): field_type = field.get("type") if field_type in parsed_models: nested_types.append(field_type) return list(set(nested_types)) def collect_behaviors(model_with_behaviors): return util.search(model_with_behaviors, ["model", "behavior"]) def convert_behavior_io_to_data_type(behavior_io_model): return behavior_io_model.get("type") def collect_data_message_types(behavior_model): inputs = behavior_model.get("input") or [] outputs = behavior_model.get("output") or [] return list(map(convert_behavior_io_to_data_type, inputs + outputs)) model_definitions = util.get_models_by_type(parsed_models, "model") behaviors = list(flatten(map(collect_behaviors, model_definitions.values()))) interface_data_message_types = list(set(flatten(map(collect_data_message_types, behaviors)))) all_definitions_types_to_generate = interface_data_message_types + collect_nested_types(interface_data_message_types) return {data_message_type: parsed_models[data_message_type] for data_message_type in all_definitions_types_to_generate}
0d561003c8cdbe7d2eb7df2f03d5939f70d81467
1,360
def _list_goals(context, message): """Show all installed goals.""" context.log.error(message) # Execute as if the user had run "./pants goals". return Phase.execute(context, 'goals')
5e823770528e97b4254e426a2d99113d119368b0
1,361
def values(df, varname): """Values and counts in index order. df: DataFrame varname: strign column name returns: Series that maps from value to frequency """ return df[varname].value_counts().sort_index()
ea548afc8e0b030e441baa54abad32318c9c007f
1,362
def get_or_none(l, n): """Get value or return 'None'""" try: return l[n] except (TypeError, IndexError): return 'None'
c46a0f4c8edc9286b0122f1643e24a04113a5bfc
1,363
def pfam_clan_to_pdb(clan): """get a list of associated PDB ids for given pfam clan access key. :param clan: pfam accession key of clan :type clan: str :return: List of associated PDB ids :rettype:list""" url='http://pfam.xfam.org/clan/'+clan+'/structures' pattern='/structure/[A-Z, 0-9]{4}' return _xfam_to(url,pattern)
820e8a058edfeee256ab01281020c6e38e2d7c6d
1,364
def fib(n): """Compute the nth Fibonacci number. >>> fib(8) 21 """ if n == 0: return 0 elif n == 1: return 1 else: return fib(n-2) + fib(n-1)
0db631be60754376e1a9287a4486ceb5ad7e392f
1,365
import os def _is_cache_dir_appropriate(cache_dir, cache_file): """ Determine if a directory is acceptable for building. A directory is suitable if any of the following are true: - it doesn't exist - it is empty - it contains an existing build cache """ if os.path.exists(cache_dir): files = os.listdir(cache_dir) if cache_file in files: return True return not bool(files) return True
b7a94540b8e97c4628224c05bfff44b798e449c9
1,366
from typing import List from typing import Union def score_tours_absolute(problems: List[N_TSP], tours: List[Union[int, NDArray]]) -> NDArray: """Calculate tour lengths for a batch of tours. Args: problems (List[N_TSP]): list of TSPs tours (List[Union[int, NDArray]]): list of tours (in either index or segment format) Returns: NDArray: tour lengths """ result = np.ndarray((len(problems),), dtype=np.float) for i, (p, t) in enumerate(zip(problems, tours)): result[i] = p.score(t) return result
b13ad2df2bfaf58f2b6989f2f2e67d917475b5bb
1,367
def has(pred: Pred, seq: Seq) -> bool: """ Return True if sequence has at least one item that satisfy the predicate. """ for x in seq: if pred(x): return True return False
bc41ceb21804cd273d0c2a71327f63f2269763d9
1,368
from typing import Optional from typing import Union from typing import List from typing import Dict from typing import Any import re from datetime import datetime def _get_dataset_domain( dataset_folder: str, is_periodic: bool, spotlight_id: Optional[Union[str, List]] = None, time_unit: Optional[str] = "day", ): """ Returns a domain for a given dataset as identified by a folder. If a time_unit is passed as a function parameter, the function will assume that the domain is periodic and with only return the min/max dates, otherwise ALL dates available for that dataset/spotlight will be returned. Params: ------ dataset_folder (str): dataset folder to search within time_unit (Optional[str]): time_unit from the dataset's metadata json file spotlight_id (Optional[str]): a dictionary containing the `spotlight_id` of a spotlight to restrict the domain search to. time_unit (Optional[str] - one of ["day", "month"]): Wether the {date} object in the S3 filenames should be matched to YYYY_MM_DD (day) or YYYYMM (month) Return: ------ List[datetime] """ s3_keys_args: Dict[str, Any] = {"prefix": dataset_folder} if spotlight_id: s3_keys_args["spotlight_id"] = spotlight_id keys = _gather_s3_keys(**s3_keys_args) if not keys: raise NoKeysFoundForSpotlight dates = [] for key in keys: # matches either dates like: YYYYMM or YYYY_MM_DD pattern = re.compile( r"[^a-zA-Z0-9]((?P<YEAR>\d{4})_(?P<MONTH>\d{2})_(?P<DAY>\d{2}))[^a-zA-Z0-9]" ) if time_unit == "month": pattern = re.compile( r"[^a-zA-Z0-9](?P<YEAR>(\d{4}))(?P<MONTH>(\d{2}))[^a-zA-Z0-9]" ) result = pattern.search(key, re.IGNORECASE,) if not result: continue date = None try: date = datetime.datetime( int(result.group("YEAR")), int(result.group("MONTH")), int(result.groupdict().get("DAY", 1)), ) except ValueError: # Invalid date value matched - skip date continue # Some files happen to have 6 consecutive digits (likely an ID of sorts) # that sometimes gets matched as a date. This further restriction of # matched timestamps will reduce the number of "false" positives (although # ID's between 201011 and 203011 will slip by) if not datetime.datetime(2010, 1, 1) < date < datetime.datetime(2030, 1, 1): continue dates.append(date.strftime("%Y-%m-%dT%H:%M:%SZ")) if is_periodic and len(dates): return [min(dates), max(dates)] return sorted(set(dates))
bc230145eee3f60491b4c42453fcbf5145ac7761
1,369
def randomize_quaternion_along_z( mujoco_simulation: RearrangeSimulationInterface, random_state: RandomState ): """ Rotate goal along z axis and return the rotated quat of the goal """ quat = _random_quat_along_z(mujoco_simulation.num_objects, random_state) return rotation.quat_mul(quat, mujoco_simulation.get_target_quat(pad=False))
9bc29520ca8f00debf819bf4dee55cce43bb8483
1,370
from io import StringIO import json def init(model): """ Initialize the server. Loads pyfunc model from the path. """ app = flask.Flask(__name__) @app.route("/ping", methods=["GET"]) def ping(): # pylint: disable=unused-variable """ Determine if the container is working and healthy. We declare it healthy if we can load the model successfully. """ health = model is not None status = 200 if health else 404 return flask.Response(response="\n", status=status, mimetype="application/json") @app.route("/invocations", methods=["POST"]) def transformation(): # pylint: disable=unused-variable """ Do an inference on a single batch of data. In this sample server, we take data as CSV or json, convert it to a Pandas DataFrame, generate predictions and convert them back to CSV. """ # Convert from CSV to pandas if flask.request.content_type == CONTENT_TYPE_CSV: data = flask.request.data.decode("utf-8") csv_input = StringIO(data) data = parse_csv_input(csv_input=csv_input) elif flask.request.content_type == CONTENT_TYPE_JSON: global logged_pandas_records_format_warning if not logged_pandas_records_format_warning: _logger.warning( "**IMPORTANT UPDATE**: Starting in MLflow 0.9.0, requests received with a" " `Content-Type` header value of `%s` will be interpreted" " as JSON-serialized Pandas DataFrames with the `split` orientation, instead" " of the `records` orientation. The `records` orientation is unsafe because" " it may not preserve column ordering. Client code should be updated to" " either send serialized DataFrames with the `split` orientation and the" " `%s` content type (recommended) or use the `%s` content type with the" " `records` orientation. For more information, see" " https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.\n", CONTENT_TYPE_JSON, CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_JSON_RECORDS_ORIENTED, ) logged_pandas_records_format_warning = True data = parse_json_input( json_input=flask.request.data.decode("utf-8"), orientation="records" ) elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED: data = parse_json_input( json_input=flask.request.data.decode("utf-8"), orientation="records" ) elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED: data = parse_json_input( json_input=flask.request.data.decode("utf-8"), orientation="split" ) else: return flask.Response( response=( "This predictor only supports the following content types," " {supported_content_types}. Got '{received_content_type}'.".format( supported_content_types=CONTENT_TYPES, received_content_type=flask.request.content_type, ) ), status=415, mimetype="text/plain", ) # Do the prediction # pylint: disable=broad-except try: raw_predictions = model.predict(data) except Exception: _handle_serving_error( error_message=( "Encountered an unexpected error while evaluating the model. Verify" " that the serialized input Dataframe is compatible with the model for" " inference." ) ) predictions = get_jsonable_obj(raw_predictions, pandas_orientation="records") result = json.dumps(predictions, cls=NumpyEncoder) return flask.Response(response=result, status=200, mimetype="application/json") return app
525ffd789be94c1e70d987c4b64c8876d9260d72
1,371
def sample_tag(user, name='Comedy'): """Creates a sample Tag""" return Tag.objects.create(user=user, name=name)
2288d8344fcf931a9e932b46db9a65275425b427
1,372
from typing import Tuple def _middle_point(p1: np.ndarray, p2: np.ndarray) -> Tuple[int, int]: """Returns the middle point (x,y) between two points Arguments: p1 (np.ndarray): First point p2 (np.ndarray): Second point """ return tuple((p1 + p2) // 2)
dcca1c1eeb0fea8c9adebfc9cccca94cb7ab7a43
1,373
def filter_with_prefixes(value, prefixes): """ Returns true if at least one of the prefixes exists in the value. Arguments: value -- string to validate prefixes -- list of string prefixes to validate at the beginning of the value """ for prefix in prefixes: if value.startswith(prefix): return False return True
56b9bacedaa7aa06023e29d45809f6e9661ee483
1,374
import os def get_idl_parser(*, allow_cache=True): """Get the global IdlParser object.""" # Singleton pattern global _parser if _parser and allow_cache: return _parser # Get source with open(os.path.join(lib_dir, "resources", "webgpu.idl"), "rb") as f: source = f.read().decode() # Create parser idl = IdlParser(source) idl.parse() _parser = idl return idl
345b1290d461ee064ae9aeb635f7f2ba902f2b1a
1,375
def is_seq(x, step=1): """Checks if the elements in a list-like object are increasing by step Parameters ---------- x: list-like step Returns ------- True if elements increase by step, else false and the index at which the condition is violated. """ for i in range(1, len(x)): if not x[i] == (x[i - 1] + step): print('Not seq at: ', i) return False return True
032e12b86aa7e50dfba2ddccd244475f58d70b29
1,376
def create_patient(record: dict) -> tuple: """ Returns a FHIR Patient resource entry and reference. """ gender = map_sex(record["sex_new"] or record["sex"]) patient_id = generate_patient_hash( names = participant_names(record), gender = gender, birth_date = record['birthday'], postal_code = participant_zipcode(record)) if not patient_id: # Some piece of information was missing, so we couldn't generate a # hash. Fallback to treating this individual as always unique by using # the REDCap record id. patient_id = generate_hash(f"{REDCAP_URL}{PROJECT_ID}/{record['record_id']}") LOG.debug(f"Generated individual identifier {patient_id}") patient_identifier = create_identifier(f"{SFS}/individual",patient_id) patient_resource = create_patient_resource([patient_identifier], gender) return create_entry_and_reference(patient_resource, "Patient")
cf211e9452b3a9f82ea1bd32bffeb9ba2146bc9e
1,377
def delete_editor(userid): """ :param userid: a string representing the user's UW NetID :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned. """ url = _make_del_account_url(userid) return _process_resp(url, get_sea_resource(url), _is_editor_deleted )
494211289faa4b16206b9687d6f7f94a8adc992a
1,378
def ecio_quality_rating(value, unit): """ ECIO (Ec/Io) - Energy to Interference Ratio (3G, CDMA/UMTS/EV-DO) """ if unit != "dBm": raise ValueError("Unsupported unit '{:}'".format(unit)) rating = 0 if value > -2: rating = 4 elif -2 >= value > -5: rating = 3 elif -5 >= value > -10: rating = 2 elif value <= -10: rating = 1 return rating
4cc21012464b8476d026f9dfbc35b8b1ea3c2d85
1,379
def normalizeFilename(filename): """normalizeFilename(filename) Replace characters that are illegal in the Window's environment""" res = filename rep = { "*":"_", "\"":"\'", "/":" per ", "\\":"_", ",":"_", "|":"_", ":":";" } for frm, to in rep.iteritems(): res = res.replace(frm, to) return res.strip()
84239d7d4fd982b27a4e0f5d20f615f3f288af85
1,380
def __virtual__(): """ Check if macOS and PyObjC is available """ if not salt.utils.platform.is_darwin(): return (False, 'module: mac_wifi only available on macOS.') if not PYOBJC: return (False, 'PyObjC not available.') return __virtualname__
46d21f3546234984890ff147a300ee8241b69ae6
1,381
def rearrange_kernel(kernel, data_shape=None): """Rearrange kernel This method rearanges the input kernel elements for vector multiplication. The input kernel is padded with zeroes to match the image size. Parameters ---------- kernel : np.ndarray Input kernel array data_shape : tuple Shape of the data Returns ------- numpy.ndarray Rearanged matrix of kernel elements """ # Define kernel shape. kernel_shape = np.array(kernel.shape) # Set data shape if not provided. if isinstance(data_shape, type(None)): data_shape = kernel_shape else: data_shape = np.array(data_shape) # Set the length of the output matrix rows. vec_length = np.prod(data_shape) # Find the diffrence between the shape of the data and the kernel. shape_diff = data_shape - kernel_shape if np.any(shape_diff < 0): raise ValueError('Kernel shape must be less than or equal to the ' 'data shape') # Set the kernel radius. kernel_rad = kernel_shape // 2 # Rotate, pad and roll the input kernel. kernel_rot = np.pad(np.rot90(kernel, 2), ((0, shape_diff[0]), (0, shape_diff[1])), 'constant') kernel_rot = np.roll(np.roll(kernel_rot, -kernel_rad[1], axis=1), -kernel_rad[0], axis=0) return np.array([np.roll(np.roll(kernel_rot, i, axis=0), j, axis=1).reshape(vec_length) for i in range(data_shape[0]) for j in range(data_shape[1])])
a5117d56f520c3a8f79ba2baea68d0b4d516158c
1,382
def exportTable(request_id, params): """Starts a table export task running. This is a low-level method. The higher-level ee.batch.Export.table object is generally preferred for initiating table exports. Args: request_id (string): A unique ID for the task, from newTaskId. If you are using the cloud API, this does not need to be from newTaskId, (though that's a good idea, as it's a good source of unique strings). It can also be empty, but in that case the request is more likely to fail as it cannot be safely retried. params: The object that describes the export task. If you are using the cloud API, this should be an ExportTableRequest. However, the "expression" parameter can be the actual FeatureCollection to be exported, not its serialized form. Returns: A dict with information about the created task. If you are using the cloud API, this will be an Operation. """ params = params.copy() return _prepare_and_run_export( request_id, params, _get_cloud_api_resource().projects().table().export)
e4dd22264c070315351bc3dd51061bc4948c9bda
1,383
def add_parents_to_frame(qs): """ There seems to be bug in the django-pandas api that self-foreign keys are not returned properly This is a workaround :param qs: :return: """ tn_parent_ids = qs.values_list("tn_parent_id", flat=True).all() df = read_frame(qs.all(), fieldnames=get_standard_field_names(), verbose=True) df['tn_parent_id'] = tn_parent_ids df['tn_parent_id'] = df['tn_parent_id'].astype('Int64') df['in_reply_to_user_id'] = df['in_reply_to_user_id'].astype('Int64') return df
cf3873822800620a6e52621846ed36dcfaee6d7b
1,384
def template_check(value): """Check if a rendered template string equals true. If value is not a string, return value as is. """ if isinstance(value, str): return value.lower() == "true" return value
3733db5c107068e815bac079fdef1a450f7acdc9
1,385
def return_npc(mcc, mnc): """ Format MCC and MNC into a NPC. :param mcc: Country code. :type mcc: int :param mnc: Network code. :type mnc: int """ return "{0}{1}30".format(str(mcc).zfill(3), str(mnc).zfill(3))
0ae5952fd7b026c2c90c72046f63ca4d08dacf06
1,386
import math def class_to_bps(bw_cls): """ Convert a SIBRA bandwidth class to bps (Bits Per Second). Class 0 is a special case, and is mapped to 0bps. :param float bw_cls: SIBRA bandwidth class. :returns: Kbps of bandwidth class :rtype: float """ if bw_cls == 0: return 0 bw_base = math.sqrt(pow(2, bw_cls - 1)) return SIBRA_BW_FACTOR * bw_base
02d0b4fbf5655318e6807bdd8c41fdfb59010ba4
1,387
def _get_capacity(): """Return constant values for dam level capacities. Storage capacity values are measured in million cubic metres i.e. Megalitres or Ml. Source: https://en.wikipedia.org/wiki/Western_Cape_Water_Supply_System @return capacity: Dict object containing maximum capacities of Western Cape dams. Includes aggregate values for small dams, big six dams and all dams. """ big_six_capacity = { 'Theewaterskloof': 480188, 'Wemmershoek': 58644, 'Steensbras Lower': 33517, 'Steenbras Upper': 31757, 'Voëlvlei': 164095, 'Berg River': 130010, } small_capacity = { 'Hely-Hutchinson': 925, 'Woodhead': 954, 'Victoria': 128, 'De Villiers': 243, 'Kleinplaats': 1368, 'Lewis Gay': 182, 'Land-en-Zeezicht': 451, } capacity = {**big_six_capacity, **small_capacity} capacity['Big Six Dams'] = sum(big_six_capacity.values()) capacity['Small Dams'] = sum(small_capacity.values()) capacity['All Dams'] = capacity['Small Dams'] + capacity['Big Six Dams'] return capacity
01d1a5e7470d578296e285e2e00cd44eaf00d15c
1,388
def login_required(f): """ Decorator to use if a view needs to be protected by a login. """ @wraps(f) def decorated_function(*args, **kwargs): if not 'username' in session: return redirect(url_for('login')) return f(*args, **kwargs) return decorated_function
d09069e65c64c06885708b10ca70f5f319389c7a
1,389
def test_bare_except() -> None: """Bare `except` to handle any uncaught exceptions.""" def reciprocal_of(value: float) -> float: try: return 1 / value except ZeroDivisionError: raise except: raise pytest.raises(TypeError, reciprocal_of, "a")
8cce2efcd850ca546f092ec0988d69e8d576e500
1,390
def _get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image in BGR order Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= cfg.PIXEL_MEANS im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] # print('cfg.TEST.SCALES: {}'.format(cfg.TEST.SCALES)), for target_size in cfg.TEST.SCALES: im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE: im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) # blob /= 255.0 return blob, np.array(im_scale_factors)
61a10be02f258680b0c103398deee9d72870035b
1,391
import sys def check_args(): """Checks the arguments passed by the command line By passing one or more parameters, you can disable a single module source. Actual parameters allowed are: * `-no-instagram`: disables Instagram source * `-no-youtube`: disables YouTube source * `-no-spotify`: disables Spotify source * `-no-birthday`: disables birthdays events source * `-no-twitter`: disables Twitter source (used for reposting) Remember that `-no-twitter` is different than `-no-tweet`: `-no-tweet` actually prevents the bot from tweeting any update from the enabled sources. The output will still be visible on the console. This is really useful for **testing**. Returns: A dictionary that contains all the sources and their state (enabled or disabled, True or False) """ source = {"instagram": True, "youtube": True, "spotify": True, "birthday": True, "twitter": True, "billboard": True} if len(sys.argv) > 1: for arg in sys.argv: if arg == "-no-tweet": print("-no-tweet parameter passed!\nTest mode enabled: the bot won't tweet anything") set_test_mode() if arg == "-no-instagram": print("-no-instagram parameter passed!") source["instagram"] = False if arg == "-no-spotify": print("-no-spotify parameter passed!") source["spotify"] = False if arg == "-no-youtube": print("-no-youtube parameter passed!") source["youtube"] = False if arg == "-no-birthday": print("-no-birthday parameter passed!") source["birthday"] = False if arg == "-no-billboard": print("-no-billboard parameter passed!") source["billboard"] = False if arg == "-no-twitter": print("-no-twitter parameter passed!") source["twitter"] = False print() return source
8458772b1a173bb5ca99fefd3d98ccd1aca32dc3
1,392
def toiter(x): """Convert to iterable. If input is iterable, returns it. Otherwise returns it in a list. Useful when you want to iterate over something (like in a for loop), and you don't want to have to do type checking or handle exceptions when it isn't a sequence""" if iterable(x): return x else: return [x]
ef9716b65893ca614dd53cc6fa7ae17b6cce2a35
1,393
def ap_date(value): """ Converts a date string in m/d/yyyy format into AP style. """ if not value: return '' bits = unicode(value).split('/') month, day, year = bits output = AP_MONTHS[int(month) - 1] output += ' ' + unicode(int(day)) output += ', ' + year return output
4ca1dab0775141f548946072e0208502d54bc784
1,394
def dump_sql(fp, query: str, encoding="utf8"): """ Write a given query into a file path. """ query = ljustify_sql(query) for line in query: fp.write(bytes(line, encoding=encoding)) return fp
b6a847dbfccb17c0cf7bd3590eee69783d83030c
1,395
def make_design_matrix(stim, d=25): """Create time-lag design matrix from stimulus intensity vector. Args: stim (1D array): Stimulus intensity at each time point. d (number): Number of time lags to use. Returns X (2D array): GLM design matrix with shape T, d """ padded_stim = np.concatenate([np.zeros(d - 1), stim]) T = len(stim) X = np.zeros((T, d)) for t in range(T): X[t] = padded_stim[t:t + d] return X
5b1759076b9e0f44ea338a4e72d2f1a76d3ccc3b
1,396
def _fit_subpixel_2d(image, coord, radius, voxel_size_yx, psf_yx): """Fit a gaussian in a 2-d image. Parameters ---------- image : np.ndarray Image with shape (y, x). coord : np.ndarray, np.int64 Coordinate of the spot detected, with shape (2,). One coordinate per dimension (yx coordinates). radius : Tuple[float] Radius in pixels of the detected spots, one element per dimension. voxel_size_yx : int or float Size of a voxel on the yx plan, in nanometer. psf_yx : int or float Theoretical size of the PSF emitted by a spot in the yx plan, in nanometer. Returns ------- new_coord : List[float] Coordinates of the spot centroid with a subpixel accuracy (one element per dimension). """ # extract spot image image_spot, bbox_low = _get_spot_surface( image, coord[0], coord[1], radius[0]) # fit gaussian try: parameters = modelize_spot(image_spot, voxel_size_z=None, voxel_size_yx=voxel_size_yx, psf_z=None, psf_yx=psf_yx, return_coord=True) # format coordinates and ensure it is fitted within the spot image y_max, x_max = image_spot.shape coord_y = parameters[0] / voxel_size_yx if coord_y < 0 or coord_y > y_max: coord_y = coord[0] else: coord_y += bbox_low[0] coord_x = parameters[1] / voxel_size_yx if coord_x < 0 or coord_x > x_max: coord_x = coord[1] else: coord_x += bbox_low[1] new_coord = [coord_y, coord_x] # if a spot is ill-conditioned, we simply keep its original coordinates except RuntimeError: new_coord = list(coord) return new_coord
6e05b395ae93319de599283fe041f681b5ee039c
1,397
def density(sisal,temp,pres,salt=None,dliq=None,chkvals=False, chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False, mathargs=None): """Calculate sea-ice total density. Calculate the total density of a sea-ice parcel. :arg float sisal: Total sea-ice salinity in kg/kg. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg salt: Seawater salinity in kg/kg. If unknown, pass None (default) and it will be calculated. :type salt: float or None :arg dliq: Seawater liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg dvap: Water vapour density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dvap: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg salt0: Initial guess for the seawater salinity in kg/kg. If None (default) then `_approx_tp` is used. :type salt0: float or None :arg dliq0: Initial guess for the seawater liquid water density in kg/m3. If None (default) then `flu3a._dliq_default` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg bool useext: If False (default) then the salt contribution is calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT. :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Density in kg/m3. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :raises RuntimeWarning: If the equilibrium seawater salinity is lower than the total parcel salinity. :Examples: >>> density(0.035,270.,1e5) 993.156434117 """ g_p = seaice_g(0,0,1,sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals, chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext, mathargs=mathargs) rho = g_p**(-1) return rho
40a365ea0d4c79813394790ef3df6c8eb21727b8
1,398
def prod_non_zero_diag(x): """Compute product of nonzero elements from matrix diagonal. input: x -- 2-d numpy array output: product -- integer number Not vectorized implementation. """ n = len(x) m = len(x[0]) res = 1 for i in range(min(n, m)): if (x[i][i] != 0): res *= x[i][i] return res pass
13e9f6cc9ea22e7901d454b23297a2e9c5da3a3a
1,399