content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Dict from typing import Any async def get_by_name(username: str) -> Dict[str, Any]: """ Retrieve one row based by its name. Return object is a dict. Raises if the record was not found. """ username = username.lower() for user in Database: if(user["username"] == username): return user raise RecordNotFoundError(f"Could not find row with username '{username}'")
eed73179c2e84e9f6b02c09215657d2b221ec601
2,400
import requests def semantics(address: str) -> "show_semantics_page": """ Semantics of address. """ response = requests.get( f"{request.url_root}api/semantics/{EthConfig.DEFAULT_CHAIN}/{address}", headers={"x-api-key": current_app.config["API_KEY"]}, ) return show_semantics_page(response)
df7feaf64bdd40fe5369c49b464990071b87750f
2,401
def update_trail(clt, trail_name, log_group_arn, role_arn): """ Update Trail to integrate with CloudWatch Logs """ try: result = clt.update_trail( Name = trail_name, CloudWatchLogsLogGroupArn = log_group_arn, CloudWatchLogsRoleArn = role_arn, ) except ClientError as e: print(e.response['Error']['Message']) return 'fail' return
b9274f6d012d74d584e5d22f520ecd76cd9301e2
2,402
from typing import Any from typing import Optional def block(**arguments: Any) -> Optional[Blocks]: """Python application interface for creating an initial block file from command line or python code. This method creates an HDF5 file associated with the desired intial flow specification (for each needed computational field), suitable for input by the FLASH application at runtime. Keyword Arguments: ndim (int): Number of simulation dimensions (i.e., 2 or 3). nxb (int): Number of grid points per block in the i direction. nyb (int): Number of grid points per block in the j direction. nzb (int): Number of grid points per block in the k direction. iprocs (int): Number of blocks in the i direction. jprocs (int): Number of blocks in the j direction. kprocs (int): Number of blocks in the k direction. fields (dict): Key/value pairs for fields (e.g., {'temp': 'center', ...}) fmethod (dict): Key/value pairs for flow initialization (e.g., {'temp': 'constant', ...}). fparam (dict): Key/value pairs for paramaters (e.g., {'temp': {'const': 0.5, ...}, ...}) used for each field method. path (str): Path to source files used in some initialization methods (e.g., python). dest (str): Path to initial block hdf5 file. ignore (bool): Ignore configuration file provided arguments, options, and flags. result (bool): Return the calculated fields by block on root. nofile (bool): Do not write the calculated fields by block to file. Note: By default this function reads the grid data from the hdf5 file (i.e., must run create.grid() first); optionally you can provide the result from grid creation directly by using an optional keyword -- coords: (ndarray, ...). """ args = process_arguments(**arguments) path = args.pop('dest') ndim = args.pop('ndim') procs = args.pop('procs') sizes = args.pop('sizes') result = args.pop('result') nofile = args.pop('nofile') cmdline = args.pop('cmdline', False) coords = args.pop('coords', None) with args.pop('context')() as progress: if coords is None: coords = read_coords(path=path, ndim=ndim) shapes = get_shapes(ndim=ndim, procs=procs, sizes=sizes) grids = get_grids(coords=coords, ndim=ndim, procs=procs, sizes=sizes) blocks, index = calc_blocks(grids=grids, procs=procs, shapes=shapes, **args) if not nofile: write_blocks(blocks=blocks, index=index, path=path, shapes=shapes) if not result: return None if cmdline: screen_out(blocks=blocks) return blocks
fe3576aea6eff240d9a46dccc12a73f5423fd705
2,403
def _low_discrepancy(dim, n, seed=0.5): """Generate a 1d, 2d, or 3d low discrepancy sequence of coordinates. Parameters ---------- dim : one of {1, 2, 3} The dimensionality of the sequence. n : int How many points to generate. seed : float or array of float, shape (dim,) The seed from which to start the quasirandom sequence. Returns ------- pts : array of float, shape (n, dim) The sampled points. References ---------- ..[1]: http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/ """ phi1 = 1.6180339887498948482 phi2 = 1.32471795724474602596 phi3 = 1.22074408460575947536 seed = np.broadcast_to(seed, (1, dim)) phi = np.array([phi1, phi2, phi3]) g = 1 / phi n = np.reshape(np.arange(n), (n, 1)) pts = (seed + (n * g[:dim])) % 1 return pts
b8aa5a54e1c3fddbe5f4cbcd2527ed63499b6447
2,404
from typing import Dict import requests def get( url: str ) -> Dict[str, object]: """ Returns the sdk GET response :param url: A string url endpoint. :type: str :return: Dict[str, object] """ try: res = requests.get(url, headers=get_headers()) except Exception as e: handle_request_error(e) return handle_response(res)
2135477211298935a95a62e064653f6713942659
2,405
def register(request): """Create an account for a new user""" if request.method == 'POST': data = request.POST.copy() form = tcdUserCreationForm(data) next = request.POST['next'] if form.is_valid(): new_user = User.objects.create_user(username=data['username'], password=data['password1'], email=data['email']) new_user.is_staff=False new_user.is_superuser=False new_user.is_active=True new_user.save() new_user = auth.authenticate(username=new_user.username, password=data['password1']) auth.login(request, new_user) new_user_profile = Profile(user=new_user, score=0 ) new_user_profile.save() return HttpResponseRedirect(next) else: form = tcdUserCreationForm() if 'next' in request.GET: next = request.GET['next'] else: next = "/" return render_to_response("registration/register.html", {'form' : form, 'redirect' : next}, context_instance=RequestContext(request) )
c01cafc378a18127f861d17643527caf95ab87ee
2,406
def set_index_da_ct(da): """Stacks all coordinates into one multindex and automatically generates a long_name""" coordnames = list(da.coords) da_stacked = da.set_index(ct=coordnames) if len(coordnames) == 1: #only one coordinate just rename ct to the coordinate name da_unstacked = da_stacked.rename(ct=coordnames[0]) else: #generate multindex long_name_string = 'Test Case (' for coord in da.coords: if 'long_name' in da.coords[coord].attrs: long_name_string = long_name_string + da.coords[coord].attrs['long_name'] + ', ' else: long_name_string = long_name_string + coord + ', ' #remove last comma and close parentheses long_name_string = long_name_string[0:-2] + ')' da_stacked.coords['ct'].attrs = dict(long_name=long_name_string) da_unstacked = da_stacked.unstack() for coord in da.coords: da_unstacked.coords[coord].attrs = da.coords[coord].attrs return da_unstacked, da_stacked
396b1c629352c3843617588071295684e1f2bf79
2,407
def LikeView(request, pk): """Function view that manages the likes and dislikes of a post""" post = get_object_or_404(Post, id=request.POST.get('post_id')) liked = False if post.likes.filter(id=request.user.id).exists(): post.likes.remove(request.user) liked = False else: post.likes.add(request.user) liked = True return HttpResponseRedirect(reverse('post-detail', args=[str(pk)]))
0031b93e1c58d897e9d56ab786c560c76dc3b962
2,408
def set_edge_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name=None, network=None, base_url=DEFAULT_BASE_URL): """Map table column values to colors to set the edge color. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping colors (list): list of hex colors to map to ``table_column_values`` mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_color (str): Hex color to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if invalid color, table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_edge_color_mapping('EdgeBetweenness', [1.0, 16.36], ['#FBE723', '#440256'], style_name='galFiltered Style') '' >>> set_edge_color_mapping('EdgeBetweenness', ['1', '2'], ['#FFFF00', '#00FF00'], 'd', style_name='galFiltered Style') '' >>> set_edge_color_mapping(**gen_node_color_map('Degree', mapping_type='d')) '' >>> set_edge_color_mapping(**gen_edge_color_map('interaction', palette_color_brewer_q_Accent(), mapping_type='d')) '' >>> set_edge_color_mapping(**gen_edge_color_map('EdgeBetweenness')) '' >>> set_edge_color_mapping(**gen_edge_color_map('EdgeBetweenness', palette_color_brewer_s_Blues())) '' >>> set_edge_color_mapping(**gen_edge_color_map('EdgeBetweenness', (palette_color_brewer_s_Blues(), palette_color_brewer_d_BrBG())) '' >>> set_edge_color_mapping('EdgeBetweennessColor', mapping_type='p', default_color='#654321', style_name='galFiltered Style') '' See Also: :meth:`gen_edge_color_map` See Also: `Value Generators <https://py4cytoscape.readthedocs.io/en/0.0.9/concepts.html#value-generators>`_ in the Concepts section in the py4cytoscape User Manual. """ verify_hex_colors(colors) # set default if default_color is not None: style_defaults.set_edge_color_default(default_color, style_name, base_url=base_url) # TODO: An error here will be missed ... shouldn't this throw an exception? # perform mapping for COLOR (i.e., when arrowColorMatchesEdge=T) # TODO: This code checks table_column, but the R code does not res = _update_visual_property('EDGE_UNSELECTED_PAINT', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url, table='edge') if res is not None: # perform mapping for STROKE (i.e., when arrowColorMatchesEdge=F) res = _update_visual_property('EDGE_STROKE_UNSELECTED_PAINT', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url, table='edge') return res
447b1e63adf3ec44912cdd1b9795d4839dea7912
2,409
def weld_describe(array, weld_type, aggregations): """ Aggregate during the same evaluation as opposed to separately as in Series.agg Parameters ---------- array : np.ndarray or WeldObject to aggregate on weld_type : WeldType of the array aggregations : list of str supported are = {'min', 'max', 'sum', 'prod', 'mean', 'std'} Returns ------- WeldObject """ assert isinstance(aggregations, list) assert len(aggregations) > 0 weld_obj = WeldObject(_encoder, _decoder) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array merger_chunk = """ let agg_%(name)s = f64( result( for( %(array)s, merger[%(type)s, %(operation)s], |b, i, e| merge(b, e) ) ) );""" mean_chunk_solo = """ let agg_mean = f64( result( for( %(array)s, merger[%(type)s, +], |b, i, n| merge(b, n) ) ) ) / f64(len(%(array)s));""" mean_chunk_with_sum = """ let agg_mean = agg_sum / f64(len(%(array)s)); """ std_chunk_solo = """ %(mean)s let agg_std = sqrt( result( for( %(array)s, merger[f64, +], |b, i, n| merge(b, pow(f64(n) - agg_mean, 2.0)) ) ) / f64(len(%(array)s) - 1L) );""".replace('%(mean)s', mean_chunk_with_sum if 'sum' in aggregations else mean_chunk_solo) std_chunk_with_mean = """ let agg_std = sqrt( result( for( %(array)s, merger[f64, +], |b, i, n| merge(b, pow(f64(n) - agg_mean, 2.0)) ) ) / f64(len(%(array)s) - 1L) );""" aggregations_dict = {'min': merger_chunk.replace('%(operation)s', 'min').replace('%(name)s', 'min'), 'max': merger_chunk.replace('%(operation)s', 'max').replace('%(name)s', 'max'), 'sum': merger_chunk.replace('%(operation)s', '+').replace('%(name)s', 'sum'), 'prod': merger_chunk.replace('%(operation)s', '*').replace('%(name)s', 'prod'), 'mean': mean_chunk_with_sum if 'sum' in aggregations else mean_chunk_solo, 'std': std_chunk_with_mean if 'mean' in aggregations else std_chunk_solo} weld_template = """ %(chunks)s let agg_result = appender[f64]; %(merges)s result(agg_result) """ chunks = ''.join([aggregations_dict[agg] for agg in aggregations]) merges = ''.join(['let agg_result = merge(agg_result, %s);\n\t' % ('agg_' + agg) for agg in aggregations]) weld_obj.weld_code = weld_template % {'chunks': chunks, 'merges': merges} \ % {'array': array_var, 'type': weld_type} return weld_obj
12e8b074b588be955be44fd8f697c2a34af187d9
2,410
def roles_required(*roles): """Decorator which specifies that a user must have all the specified roles. Example:: @app.route('/dashboard') @roles_required('admin', 'editor') def dashboard(): return 'Dashboard' The current user must have both the `admin` role and `editor` role in order to view the page. :param args: The required roles. """ def wrapper(fn): @wraps(fn) def decorated_view(*args, **kwargs): perms = [Permission(RoleNeed(role)) for role in roles] for perm in perms: if not perm.can(): if _security._unauthorized_callback: return _security._unauthorized_callback() else: return _get_unauthorized_view() return fn(*args, **kwargs) return decorated_view return wrapper
9b3f3103b7ada875ec1e20f90afa953f2073c2c1
2,411
import requests def get_all_forms(url): """Given a `url`, it returns all forms from the HTML content""" soup = bs(requests.get(url).content, "html.parser") return soup.find_all("form")
2ffa9b9165ae14de4e410ca638db0bc9db432642
2,412
def cal_max_len(ids, curdepth, maxdepth): """calculate max sequence length""" assert curdepth <= maxdepth if isinstance(ids[0], list): res = max([cal_max_len(k, curdepth + 1, maxdepth) for k in ids]) else: res = len(ids) return res
0a6c4c96d7518b98d69141711272a97426a623b2
2,413
def urlencode(query, doseq=True, quote_via=quote_plus): """ An alternate implementation of Python's stdlib :func:`urllib.parse.urlencode` function which accepts unicode keys and values within the ``query`` dict/sequence; all Unicode keys and values are first converted to UTF-8 before being used to compose the query string. The value of ``query`` must be a sequence of two-tuples representing key/value pairs *or* an object (often a dictionary) with an ``.items()`` method that returns a sequence of two-tuples representing key/value pairs. For minimal calling convention backwards compatibility, this version of urlencode accepts *but ignores* a second argument conventionally named ``doseq``. The Python stdlib version behaves differently when ``doseq`` is False and when a sequence is presented as one of the values. This version always behaves in the ``doseq=True`` mode, no matter what the value of the second argument. Both the key and value are encoded using the ``quote_via`` function which by default is using a similar algorithm to :func:`urllib.parse.quote_plus` which converts spaces into '+' characters and '/' into '%2F'. .. versionchanged:: 1.5 In a key/value pair, if the value is ``None`` then it will be dropped from the resulting output. .. versionchanged:: 1.9 Added the ``quote_via`` argument to allow alternate quoting algorithms to be used. """ try: # presumed to be a dictionary query = query.items() except AttributeError: pass result = '' prefix = '' for (k, v) in query: k = quote_via(k) if is_nonstr_iter(v): for x in v: x = quote_via(x) result += '%s%s=%s' % (prefix, k, x) prefix = '&' elif v is None: result += '%s%s=' % (prefix, k) else: v = quote_via(v) result += '%s%s=%s' % (prefix, k, v) prefix = '&' return result
de023be6a81b5ce27a552ede4e7e4e0da4f9e9b5
2,414
def clip_action(action, action_min, action_max): """ Truncates the entries in action to the range defined between action_min and action_max. """ return np.clip(action, action_min, action_max)
f332b1274ecdd8e104de294568d59e7d0ea056ca
2,415
def isGray(image): """Return True if the image has one channel per pixel.""" return image.ndim < 3
0fdee0d1b6a99ab91354f585cf648583e57d5645
2,416
import logging import torch def mat2img(input_matrix, index_matrix): """ Transforms a batch of features of matrix images in a batch of features of vector images. Args: input_matrix (torch.Tensor): The images with shape (batch, features, matrix.size). index_matrix (torch.Tensor): The index matrix for the images, shape(1, 1, matrix.size). """ logger = logging.getLogger(__name__ + '.mat2img') logger.debug('input matrix shape : {}'.format(input_matrix.size())) image_length = index_matrix[0, 0, torch.ge(index_matrix[0, 0], 0)].size(0) logger.debug('new image length : {}'.format(image_length)) images = input_matrix.new_zeros((input_matrix.size(0), input_matrix.size(1), image_length), dtype=torch.float) logger.debug('new images shape : {}'.format(images.size())) for i in range(index_matrix.size(-2)): # iterate over the rows of index matrix for j in range(index_matrix.size(-1)): # iterate over the cols of index matrix if index_matrix[0, 0, i, j] != -1: images[:, :, int(index_matrix[0, 0, i, j])] = input_matrix[:, :, i, j] return images
d822635157c1aeb6c0b52d37082eec5f2a6833b4
2,417
def _check_molecule_format(val): """If it seems to be zmatrix rather than xyz format we convert before returning""" atoms = [x.strip() for x in val.split(";")] if atoms is None or len(atoms) < 1: # pylint: disable=len-as-condition raise QiskitNatureError("Molecule format error: " + val) # An xyz format has 4 parts in each atom, if not then do zmatrix convert # Allows dummy atoms, using symbol 'X' in zmatrix format for coord computation to xyz parts = [x.strip() for x in atoms[0].split(" ")] if len(parts) != 4: try: zmat = [] for atom in atoms: parts = [x.strip() for x in atom.split(" ")] z = [parts[0]] for i in range(1, len(parts), 2): z.append(int(parts[i])) z.append(float(parts[i + 1])) zmat.append(z) xyz = z2xyz(zmat) new_val = "" for atm in xyz: if atm[0].upper() == "X": continue if new_val: new_val += "; " new_val += f"{atm[0]} {atm[1]} {atm[2]} {atm[3]}" return new_val except Exception as exc: raise QiskitNatureError("Failed to convert atom string: " + val) from exc return val
d546251f02c6ee3bfe44256edff439ba4d3b4c31
2,418
def measure_area_perimeter(mask): """A function that takes either a segmented image or perimeter image as input, and calculates the length of the perimeter of a lesion.""" # Measure area: the sum of all white pixels in the mask image area = np.sum(mask) # Measure perimeter: first find which pixels belong to the perimeter. perimeter = measure.perimeter(mask) return area, perimeter
f443b7208c8c452480f0f207153afc5aa1f11d41
2,419
import os import requests def _click_up_params(user_email: str) -> dict: """ Load a Click Up parameters for this user. Args: user_email (str): Email of user making the request. Returns: (dict): A dict containing the elements: 'success': (Boolean) True if successful, otherwise False 'message': (str) Message to display to the user if not successful """ # Make sure the server's environment is set up properly. param_names = ['CLICK_UP_BASE_URL', 'CLICK_UP_REDIRECT_PATH', 'CLICK_UP_AUTH_URL', 'CLICK_UP_CLIENT_ID', 'CLICK_UP_CLIENT_SECRET'] missing_params = [] for param in param_names: if os.environ.get(param, None) is None: missing_params.append(param) if missing_params: LOGGER.error(f"Missing Click Up environment variables: {missing_params}") return {'success': False, 'message': 'Click Up environment is not configured. Check log file.'} # See if the user is logged in to Click Up. access_token = session.get('click_up_access_token') LOGGER.debug(f"Click Up Access Token: {access_token}") if access_token is None: LOGGER.debug('User is not logged in to Click Up') return _make_click_up_login() url = os.environ.get('CLICK_UP_BASE_URL', None) headers = {'Authorization': access_token} # Get Team ID result = requests.get(url + '/team', headers=headers) data = result.json() # See if we need to login in again. ecode = data.get('ECODE', '') if ecode in ['OAUTH_019', 'OAUTH_021', 'OAUTH_025', 'OAUTH_077']: LOGGER.debug(f"User needs to login to Click Up. Again. ECODE={ecode}") session['click_up_access_token'] = None return _make_click_up_login() target_team_name = _get_click_up_team_name(user_email) team_id = None for team in data.get('teams', []): if team.get('name', '') == target_team_name: team_id = team.get('id', None) break if team_id is None: message = f"Could not find target team '{target_team_name}'" LOGGER.debug(message) return {'success': False, 'message': message} else: LOGGER.debug(f"Found target team '{target_team_name}' having ID {team_id}") session['click_up_team_id'] = team_id # Get Workspace ID result = requests.get(url + '/team/' + team_id + '/space?archived=false', headers=headers) data = result.json() target_workspace_name = _get_click_up_workspace_name(user_email) workspace_id = None for workspace in data.get('spaces', []): if workspace.get('name', '') == target_workspace_name: workspace_id = workspace.get('id', None) break if workspace_id is None: message = f"Could not find workspace team '{target_workspace_name}'" LOGGER.debug(message) return {'success': False, 'message': message} else: LOGGER.debug(f"Found target workspace '{target_workspace_name}' having ID {workspace_id}") session['click_up_workspace_id'] = workspace_id # LOGGER.debug(json.dumps(data, indent=4)) return {'success': True, 'message': 'Team ID and Workspace ID have been located and bookmarked'}
4a4a3aedb7f1331acdbc9f36416ae14594c4fca0
2,420
from typing import Any from unittest.mock import Mock def mock_object(**params: Any) -> "Mock": # type: ignore # noqa """creates an object using params to set attributes >>> option = mock_object(verbose=False, index=range(5)) >>> option.verbose False >>> option.index [0, 1, 2, 3, 4] """ return type("Mock", (), params)()
52140b52d29a424b3f16f0e26b03c19f4afbb0b4
2,421
def get_words(message): """Get the normalized list of words from a message string. This function should split a message into words, normalize them, and return the resulting list. For splitting, you should split on spaces. For normalization, you should convert everything to lowercase. Args: message: A string containing an SMS message Returns: The list of normalized words from the message. """ words = message.strip().split() norm_words = [word.lower() for word in words] # apply stop words nonstop_words = [word for word in norm_words if not word in stop_words] # apply stemming stem_words = [ps.stem(word) for word in nonstop_words] return stem_words
dec592d3574da70c27368c4642f5fa47d23b5225
2,422
def get_atomic_forces_card(name, **kwargs): """ Convert XML data to ATOMIC_FORCES card :param name: Card name :param kwargs: Dictionary with converted data from XML file :return: List of strings """ try: external_atomic_forces = kwargs['external_atomic_forces'] except KeyError: logger.debug("Missing required arguments when building ATOMIC_FORCES card!") return [] if len(external_atomic_forces ) == 0: return [] # Warning if number of atoms in atomic positions differ with forces try: atomic_positions = kwargs.get('atomic_positions', {}) except KeyError: atomic_positions = kwargs.get('crystal_positions',{}) atoms = atomic_positions.get('atom', []) if atoms and len(atoms) != len(external_atomic_forces): logger.error("incorrect number of atomic forces") # Build input card text lines lines = [name] for forces in external_atomic_forces: lines.append(' {0}'.format(' '.join([str(value) for value in forces]))) return lines
5ded8abd9b3ce4ba41b14f7edbe5974063cdf08d
2,423
def hasPathSum(self, root, sum): """ :type root: TreeNode :type sum: int :rtype: bool """ if root is None: return False if sum - root.val == 0 and root.left is None and root.right is None: return True else: return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val)
ffab5b8205aa9785c86ac365bd6e854319138627
2,424
def _(node: IntJoin, ctx: AnnotateContext) -> BoxType: """All references available on either side of the Join nodes are available.""" lt = box_type(node.over) rt = box_type(node.joinee) t = union(lt, rt) node.typ = t return t
c5d2c94f58c019399ebfcc431994ab339f317b0c
2,425
import json def detect_global_table_updates(record): """This will detect DDB Global Table updates that are not relevant to application data updates. These need to be skipped over as they are pure noise. :param record: :return: """ # This only affects MODIFY events. if record['eventName'] == 'MODIFY': # Need to compare the old and new images to check for GT specific changes only (just pop off the GT fields) old_image = remove_global_dynamo_specific_fields(record['dynamodb']['OldImage']) new_image = remove_global_dynamo_specific_fields(record['dynamodb']['NewImage']) if json.dumps(old_image, sort_keys=True) == json.dumps(new_image, sort_keys=True): return True return False
43d9cd6558b0e935a4e195e80932104699564230
2,426
from typing import List import warnings def fix_telecined_fades(clip: vs.VideoNode, tff: bool | int | None = None, thr: float = 2.2) -> vs.VideoNode: """ A filter that gives a mathematically perfect solution to fades made *after* telecining (which made perfect IVTC impossible). This is an improved version of the Fix-Telecined-Fades plugin that deals with overshoot/undershoot by adding a check. Make sure to run this *after* IVTC/deinterlacing! If the value surpases thr * original value, it will not affect any pixels in that frame to avoid it damaging frames it shouldn't need to. This helps a lot with orphan fields as well, which would otherwise create massive swings in values, sometimes messing up the fade fixing. If you pass your own float clip, you'll want to make sure to properly dither it down after. If you don't do this, you'll run into some serious issues! Taken from this gist and modified by LightArrowsEXE. <https://gist.github.com/blackpilling/bf22846bfaa870a57ad77925c3524eb1> :param clip: Input clip :param tff: Top-field-first. `False` sets it to Bottom-Field-First. If None, get the field order from the _FieldBased prop. :param thr: Threshold for when a field should be adjusted. Default is 2.2, which appears to be a safe value that doesn't cause it to do weird stuff with orphan fields. :return: Clip with only fades fixed """ def _ftf(n: int, f: List[vs.VideoFrame]) -> vs.VideoNode: avg = (get_prop(f[0], 'PlaneStatsAverage', float), get_prop(f[1], 'PlaneStatsAverage', float)) if avg[0] != avg[1]: mean = sum(avg) / 2 fixed = (sep[0].std.Expr(f"x {mean} {avg[0]} / dup {thr} <= swap 1 ? *"), sep[1].std.Expr(f"x {mean} {avg[1]} / *")) else: fixed = sep # type: ignore return core.std.Interleave(fixed).std.DoubleWeave()[::2] # I want to catch this before it reaches SeperateFields and give newer users a more useful error if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None: raise vs.Error("fix_telecined_fades: 'You must set `tff` for this clip!'") elif isinstance(tff, (bool, int)): clip = clip.std.SetFieldBased(int(tff) + 1) clip32 = depth(clip, 32).std.Limiter() bits = get_depth(clip) sep = clip32.std.SeparateFields().std.PlaneStats() sep = sep[::2], sep[1::2] # type: ignore # I know this isn't good, but frameeval breaks otherwise ftf = core.std.FrameEval(clip32, _ftf, sep) # and I don't know how or why if bits == 32: warnings.warn("fix_telecined_fades: 'Make sure to dither down BEFORE setting the FieldBased prop to 0! " "Not doing this MAY return some of the combing!'") else: ftf = depth(ftf, bits, dither_type=Dither.ERROR_DIFFUSION) ftf = ftf.std.SetFieldBased(0) return ftf
d28b78bdb65ffc0c1d354fbbde25391d7ce389b1
2,427
import os from typing import Dict from typing import Any from typing import List import copy from datetime import datetime import json from typing import Union def compute_statistics(provider_slug, tile_grid=get_default_tile_grid(), filename=None): """ :param export_task_records: ExporTaskRecords is a list of all export tasks :param get_group: Function to generate a group id given a DataExportProviderTask :param tile_grid: Calculate statistics for each tile in the tile grid :param filename: Serializes the intermediate data-sample data so it can be shared btw different deployments :return: A dict with statistics including area, duration, and package size per sq. kilometer """ max_estimate_export_task_records = os.getenv("MAX_ESTIMATE_EXPORT_TASK_RECORDS", 10000) # Order by time descending to ensure more recent samples are collected first export_task_records: QuerySet[ExportTaskRecord] = ( ExportTaskRecord.objects.filter( export_provider_task__provider__slug=provider_slug, status=TaskState.SUCCESS.value, export_provider_task__status=TaskState.COMPLETED.value, result__isnull=False, # Only use results larger than a MB, # anything less is likely a failure or a test. result__size__gt=1, ) .order_by("-finished_at") .select_related("result", "export_provider_task__run__job", "export_provider_task__provider") .all()[: int(max_estimate_export_task_records)] ) processed_runs: Dict[str, Any] = {} processed_dptr: Dict[str, Any] = {} export_task_count = 0 total_count = len(export_task_records) # This should be the first and only DB hit. all_stats: Dict[str, Any] = {} logger.debug("Prefetching geometry data from all Jobs") logger.info(f"Beginning collection of statistics for {total_count} {provider_slug} ExportTaskRecords") runs: List[ExportRun] = list( set([export_task_record.export_provider_task.run for export_task_record in export_task_records]) ) data_provider_task_records: List[DataProviderTaskRecord] = list( set([export_task_record.export_provider_task for export_task_record in export_task_records]) ) default_stat = get_default_stat() accessors = get_accessors() global_stats = get_child_entry(all_stats, global_key, default_stat) for run in runs: area = get_geometry_description(run.job.the_geom)["area"] collect_samples(run, [global_stats], ["duration", "area"], accessors, area) for data_provider_task_record in data_provider_task_records: area = get_geometry_description(data_provider_task_record.run.job.the_geom)["area"] provider_stats = get_child_entry(all_stats, data_provider_task_record.provider.slug, default_stat) collect_samples(data_provider_task_record, [provider_stats], ["duration", "area"], accessors, area) collected_stats = collect_samples_for_export_task_records(export_task_records, copy.deepcopy(all_stats), tile_grid) [all_stats.update(stat) for stat in collected_stats] logger.info( f"Computing statistics across {export_task_count} completed " f"{provider_slug} ExportTaskRecords (geom_cache_misses={_dbg_geom_cache_misses})" ) # TODO: Merge in any auxiliary sample data? if filename is not None: all_stats["timestamp"] = str(datetime.datetime.now()) with open(filename, "w") as file: json.dump(all_stats, file) totals: Dict[str, Union[int, dict]] = { "run_count": len(processed_runs), "data_provider_task_count": len(processed_dptr), "export_task_count": export_task_count, } returned_totals = process_totals_concurrently(list(all_stats.keys()), copy.deepcopy(all_stats)) [totals.update(total) for total in returned_totals] tile_count = sum([provider.get("tile_count", 0) for slug, provider in totals.items() if isinstance(provider, dict)]) logger.info("Generated statistics for %d tiles for group %s", tile_count, provider_slug) return totals
13f6a51492235d0f9b9b6211494c105bbd547300
2,428
from typing import Union from typing import Optional from io import StringIO def compare_rdf(expected: Union[Graph, str], actual: Union[Graph, str], fmt: Optional[str] = "turtle") -> Optional[str]: """ Compare expected to actual, returning a string if there is a difference :param expected: expected RDF. Can be Graph, file name, uri or text :param actual: actual RDF. Can be Graph, file name, uri or text :param fmt: RDF format :return: None if they match else summary of difference """ def rem_metadata(g: Graph) -> IsomorphicGraph: # Remove list declarations from target for s in g.subjects(RDF.type, RDF.List): g.remove((s, RDF.type, RDF.List)) for t in g: if t[1] in (META.generation_date, META.source_file_date, META.source_file_size, TYPE.generation_date, TYPE.source_file_date, TYPE.source_file_size): g.remove(t) g_iso = to_isomorphic(g) return g_iso # Bypass compare if settings have turned it off if SKIP_RDF_COMPARE: print(f"tests/utils/compare_rdf.py: {SKIP_RDF_COMPARE_REASON}") return None expected_graph = to_graph(expected, fmt) expected_isomorphic = rem_metadata(expected_graph) actual_graph = to_graph(actual, fmt) actual_isomorphic = rem_metadata(actual_graph) # Graph compare takes a Looong time in_both, in_old, in_new = graph_diff(expected_isomorphic, actual_isomorphic) # if old_iso != new_iso: # in_both, in_old, in_new = graph_diff(old_iso, new_iso) old_len = len(list(in_old)) new_len = len(list(in_new)) if old_len or new_len: txt = StringIO() with redirect_stdout(txt): print("----- Missing Triples -----") if old_len: print_triples(in_old) print("----- Added Triples -----") if new_len: print_triples(in_new) return txt.getvalue() return None
f2e128e1c43c5c207e99c30bb32c14f4c4b71798
2,429
def start_engine(engine_name, tk, context): """ Creates an engine and makes it the current engine. Returns the newly created engine object. Example:: >>> import sgtk >>> tk = sgtk.sgtk_from_path("/studio/project_root") >>> ctx = tk.context_empty() >>> engine = sgtk.platform.start_engine('tk-maya', tk, ctx) >>> engine <Sgtk Engine 0x10451b690: tk-maya, env: shotgun> :param engine_name: Name of the engine to launch, e.g. tk-maya :param tk: :class:`~sgtk.Sgtk` instance to associate the engine with :param context: :class:`~sgtk.Context` object of the context to launch the engine for. :returns: :class:`Engine` instance :raises: :class:`TankEngineInitError` if an engine could not be started for the passed context. """ return _start_engine(engine_name, tk, None, context)
bb755d359f5a950aa182545803de0a1ca4d6aaee
2,430
def parse_vaulttext(b_vaulttext): """Parse the vaulttext. Args: b_vaulttext: A byte str containing the vaulttext (ciphertext, salt, crypted_hmac). Returns: A tuple of byte str of the ciphertext suitable for passing to a Cipher class's decrypt() function, a byte str of the salt, and a byte str of the crypted_hmac. Raises: AnsibleVaultFormatError: If the vaulttext format is invalid. """ # SPLIT SALT, DIGEST, AND DATA try: return _parse_vaulttext(b_vaulttext) except AnsibleVaultFormatError: raise except Exception as exc: raise AnsibleVaultFormatError(f'Vault vaulttext format error: {exc}')
1b1b6e2aaf1893401d93f750248892ffebae26a6
2,431
import sqlite3 def does_column_exist_in_db(db, table_name, col_name): """Checks if a specific col exists""" col_name = col_name.lower() query = f"pragma table_info('{table_name}');" all_rows = [] try: db.row_factory = sqlite3.Row # For fetching columns by name cursor = db.cursor() cursor.execute(query) all_rows = cursor.fetchall() except sqlite3.Error as ex: print(f'Query error, query={query} Error={ex}') for row in all_rows: if row['name'].lower() == col_name: return True return False
90abc20c9643e93641e37c0e94fd504cbcf09928
2,432
import hmac def make_secure_val(val): """Takes hashed pw and adds salt; this will be the cookie""" return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
6b29f5f3a447bca73ac02a1d7843bdbb6d982db9
2,433
import random def get_ad_contents(queryset): """ Contents의 queryset을 받아서 preview video가 존재하는 contents를 랜덤으로 1개 리턴 :param queryset: Contents queryset :return: contents object """ contents_list = queryset.filter(preview_video__isnull=False) max_int = contents_list.count() - 1 if max_int < 0: return while True: idx = random.randint(0, max_int) contents = contents_list[idx] if contents: return contents
233d1e5f736a9cff38731dd292d431f098bee17a
2,434
def Image_CanRead(*args, **kwargs): """ Image_CanRead(String filename) -> bool Returns True if the image handlers can read this file. """ return _core_.Image_CanRead(*args, **kwargs)
f82e31860480611baf6a5f920515466c0d37acab
2,435
def flatten(lst): """Flatten a list.""" return [y for l in lst for y in flatten(l)] if isinstance(lst, (list, np.ndarray)) else [lst]
0aed241d06725dee9a99512ab2ea5c3f6c02008d
2,436
def calc_mean_score(movies): """Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place""" ratings = [m.score for m in movies] mean = sum(ratings) / max(1, len(ratings)) return round(mean, 1)
6f837ff251e6221227ba4fa7da752312437da90f
2,437
def srun(hosts, cmd, srun_params=None): """Run srun cmd on slurm partition. Args: hosts (str): hosts to allocate cmd (str): cmdline to execute srun_params(dict): additional params for srun Returns: CmdResult: object containing the result (exit status, stdout, etc.) of the srun command """ cmd = srun_str(hosts, cmd, srun_params) try: result = run_command(cmd, timeout=30) except DaosTestError as error: result = None raise SlurmFailed("srun failed : {}".format(error)) return result
2e339d90c2de4b1ae81f7e4671c1f726a725a68c
2,438
def COSclustering(key, emb, oracle_num_speakers=None, max_num_speaker=8, MIN_SAMPLES=6): """ input: key (str): speaker uniq name emb (np array): speaker embedding oracle_num_speaker (int or None): oracle number of speakers if known else None max_num_speakers (int): maximum number of clusters to consider for each session MIN_SAMPLES (int): minimum number of samples required for NME clustering, this avoids zero p_neighbour_lists. Default of 6 is selected since (1/rp_threshold) >= 4. output: Y (List[int]): speaker labels """ est_num_spks_out_list = [] mat = get_eigen_matrix(emb) if oracle_num_speakers: max_num_speaker = oracle_num_speakers X_conn_spkcount, rp_thres_spkcount, est_num_of_spk, lambdas, p_neigh = NMEanalysis(mat, max_num_speaker) if emb.shape[0] > MIN_SAMPLES: X_conn_from_dist = get_X_conn_from_dist(mat, p_neigh) else: X_conn_from_dist = mat if oracle_num_speakers: est_num_of_spk = oracle_num_speakers est_num_spks_out_list.append([key, str(est_num_of_spk)]) # Perform spectral clustering spectral_model = sklearn_SpectralClustering( affinity='precomputed', eigen_solver='amg', random_state=0, n_jobs=-1, n_clusters=est_num_of_spk, eigen_tol=1e-10, ) Y = spectral_model.fit_predict(X_conn_from_dist) return Y
a3b967251683da1e29004a937625d7006a0519ed
2,439
import torch def gauss_distance(sample_set, query_set, unlabeled_set=None): """ (experimental) function to try different approaches to model prototypes as gaussians Args: sample_set: features extracted from the sample set query_set: features extracted from the query set query_set: features extracted from the unlabeled set """ b, n, k, c = sample_set.size() sample_set_std = sample_set.std(2).view(b, 1, n, c) sample_set_mean = sample_set.mean(2).view(b, 1, n, c) query_set = query_set.view(b, n * k, 1, c) d = (query_set - sample_set_mean) / sample_set_std return -torch.sum(d ** 2, 3) / np.sqrt(c)
b7583988d79d70bda9c3ab6ee0690042645ed714
2,440
def make_mps_left(mps,truncate_mbd=1e100,split_s=False): """ Put an mps into left canonical form Args: mps : list of mps tensors The MPS stored as a list of mps tensors Kwargs: truncate_mbd : int The maximum bond dimension to which the mps should be truncated Returns: mps : list of mps tensors The resulting left-canonicalized MPS """ # Figure out size of mps N = len(mps) # Loop backwards for site in range(N-1): #tmpprint('\t\t\t\tSite: {}'.format(site)) mps = move_gauge_right(mps,site, truncate_mbd=truncate_mbd, return_ent=False, return_wgt=False, split_s=split_s) # Remove empty indices at the ends of the mps mps = remove_empty_ends(mps) # Return results return mps
f2de408b82877050bf24a822c54b4520dad40f2e
2,441
def word_after(line, word): """'a black sheep', 'black' -> 'sheep'""" return line.split(word, 1)[-1].split(' ', 1)[0]
cfa16244d00af8556d7955b7edeb90bac0a213ba
2,442
def domain_in_domain(subdomain, domain): """Returns try if subdomain is a sub-domain of domain. subdomain A *reversed* list of strings returned by :func:`split_domain` domain A *reversed* list of strings as returned by :func:`split_domain` For example:: >>> domain_in_domain(['com', 'example'], ... ['com', 'example', 'www']) True""" if len(subdomain) <= len(domain): i = 0 for d in subdomain: if d != domain[i]: return False i += 1 return True else: return False
cb1b3a3f899f13c13d4168c88ca5b9d4ee345e47
2,443
def polygon_from_boundary(xs, ys, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, xtol=0.0): """Polygon within box left of boundary given by (xs, ys) xs, ys: coordinates of boundary (ys ordered increasingly) """ xs = np.asarray(xs) ys = np.asarray(ys) xs[xs > xmax-xtol] = xmax xs[xs < xmin+xtol] = xmin index = -1 while xs[index] == xmin: index -= 1 if index < -2: xs, ys = xs[:index+2], ys[:index+2] vertices = zip(xs, ys) if len(xs) == 1: vertices.append((xs[0], ymax)) vertices.append((xmin, ymax)) elif xs[-1] >= xmax-xtol: if xs[-1] < xmax: vertices.append((xmax, ys[-1])) if ys[-1] < ymax: vertices.append((xmax, ymax)) vertices.append((xmin, ymax)) elif xs[-1] > xmin: vertices.append((xmin, ys[-1])) if (xs[0] > xmin) or (ys[0] > ymin): vertices.append((xmin, ymin)) if ys[0] > ymin: vertices.append((xs[0], ymin)) vertices = np.asarray(vertices) return vertices
4e75cd8b11038432224836b427658226d4c820d7
2,444
def is_degenerate(op, tol=1e-12): """Check if operator has any degenerate eigenvalues, determined relative to mean spacing of all eigenvalues. Parameters ---------- op : operator or 1d-array Operator or assumed eigenvalues to check degeneracy for. tol : float How much closer than evenly spaced the eigenvalue gap has to be to count as degenerate. Returns ------- n_dgen : int Number of degenerate eigenvalues. """ op = np.asarray(op) if op.ndim != 1: evals = eigvalsh(op) else: evals = op l_gaps = evals[1:] - evals[:-1] l_tol = tol * (evals[-1] - evals[0]) / op.shape[0] return np.count_nonzero(abs(l_gaps) < l_tol)
2d2672c711c1e4320de151484cdeb7463cf0abd8
2,445
def _get_skip_props(mo, include_operational=False, version_filter=True): """ Internal function to skip mo property if not to be considered for sync. """ skip_props = [] for prop in mo.prop_meta: mo_property_meta = mo.prop_meta[prop] if mo_property_meta is None: continue # not include operational property if not include_operational: if mo_property_meta.access in (MoPropertyMeta.INTERNAL, MoPropertyMeta.READ_ONLY): skip_props.append(prop) # checks if property is part of current or earlier ucsm schema if version_filter: version = mo.get_handle().version if version is None or version < mo_property_meta.version or \ mo_property_meta.access == MoPropertyMeta.INTERNAL: skip_props.append(prop) return skip_props
dd24798c84f47a954eb324092c3cfb46c23a062e
2,446
def generate_split_problem(): """Generates a 'Split' problem configuration. Returns (environment, robot, start configuration, goal configuration).""" walls = [rectangle(0, 400, 0, 10), rectangle(0, 400, 290, 300), rectangle(0, 10, 0, 300), rectangle(390, 400, 0, 300), rectangle(180, 220, 100, 200)] split_environment = Environment(walls) robot_geometry = Polygon([(-15, -15), (-15, 15), (15, 15), (15, -15)]) robot = Robot(robot_geometry) start = np.array([50, 150, 0]) goal = np.array([350, 150, 0]) return split_environment, robot, start, goal
a2a3ab0495dcf5a109ed2eb2e92bb0db424edd53
2,447
def problem_generator(difficulty=3): """ This function generates mathematical expressions as string. It is not very smart and will generate expressions that have answers the lex function cannot accept. """ operators = ["/", "*", "+", "-"] numeric_lim = difficulty * 7 output = "" for i in range(difficulty + 3): if i % 2 == 0: output += str(randint(1, numeric_lim)) + " " else: output += operators[randint(0, len(operators) - 1)] + " " if output[len(output) - 2] in operators: output += str(randint(1, numeric_lim)) return output
f1859784a065a22adb83c33d46623fcafa470096
2,448
from typing import Tuple import math def logical_factory_dimensions(params: Parameters ) -> Tuple[int, int, float]: """Determine the width, height, depth of the magic state factory.""" if params.use_t_t_distillation: return 12*2, 8*2, 6 # Four T2 factories l1_distance = params.l1_distance l2_distance = params.code_distance t1_height = 4 * l1_distance / l2_distance t1_width = 8 * l1_distance / l2_distance t1_depth = 5.75 * l1_distance / l2_distance ccz_depth = 5 ccz_height = 6 ccz_width = 3 storage_width = 2 * l1_distance / l2_distance ccz_rate = 1 / ccz_depth t1_rate = 1 / t1_depth t1_factories = int(math.ceil((ccz_rate * 8) / t1_rate)) t1_factory_column_height = t1_height * math.ceil(t1_factories / 2) width = int(math.ceil(t1_width * 2 + ccz_width + storage_width)) height = int(math.ceil(max(ccz_height, t1_factory_column_height))) depth = max(ccz_depth, t1_depth) return width, height, depth
95846f5f58e5c342ca81b3f51a9bacfb31bf777a
2,449
import re import argparse def parse_list_or_range(arg): """ Parses a string that represents either an integer or a range in the notation ``<start>:<step>:<stop>``. Parameters ---------- arg : :obj:`str` Integer or range string. Returns ------- int or :obj:`list` of int Raises ------ ArgumentTypeError If input can neither be interpreted as an integer nor a valid range. """ if re.match(r'^\d+:\d+:\d+$', arg) or re.match(r'^\d+:\d+$', arg): rng_params = list(map(int, arg.split(':'))) step = 1 if len(rng_params) == 2: # start, stop start, stop = rng_params else: # start, step, stop start, step, stop = rng_params rng = list(range(start, stop + 1, step)) # include last stop-element in range if len(rng) == 0: raise argparse.ArgumentTypeError('{0} is an empty range'.format(arg)) return rng elif re.match(r'^\d+$', arg): return int(arg) raise argparse.ArgumentTypeError( '{0} is neither a integer list, nor valid range in the form <start>:[<step>:]<stop>'.format( arg ) )
0d487bd80fc14b763a16bc8a167983a1f7959e3e
2,450
def data(): """ Data providing function: This function is separated from create_model() so that hyperopt won't reload data for each evaluation run. """ d_file = 'data/zinc_100k.h5' data_train, data_test, props_train, props_test, tokens = utils.load_dataset(d_file, "TRANSFORMER", True) x_train = [data_train, data_train, props_train] y_train = None x_test = [data_test, data_test, props_test] y_test = None return x_train, y_train, x_test, y_test
354ace6fdfc9f8f9ec0702ea8a0a03853b8d7f49
2,451
from typing import Optional from typing import Dict from typing import Any from typing import List import sys import itertools def make_commands( script: str, base_args: Optional[Dict[str, Any]] = None, common_hyper_args: Optional[Dict[str, List[Any]]] = None, algorithm_hyper_args: Optional[Dict[str, List[Any]]] = None, ) -> List[str]: """Generate command to run. It will generate a list of commands to be use with the runners. Each command will look like: python script --base_arg_key --base_arg_val --common_hyper_key --common_hyper_key --algorithm_hyper_key --algorithm_hyper_key --mutually_exclusive_args where a separate command is generated for each common hyper_parameter and algorithm_hyper_parameter Parameters ---------- script: str. String with script to run. base_args: dict Base arguments to execute. common_hyper_args: dict Iterable hyper parameters to execute in different runs. algorithm_hyper_args Algorithm dependent hyper parameters to execute. Returns ------- commands: List[str] List with commands to execute. """ interpreter_script = sys.executable base_cmd = interpreter_script + " " + script commands = [] # List[str] if common_hyper_args is None: common_hyper_args = dict() # pragma: no cover common_hyper_args = common_hyper_args.copy() if algorithm_hyper_args is not None: common_hyper_args.update(algorithm_hyper_args) hyper_args_list = list( dict(zip(common_hyper_args, x)) for x in itertools.product(*common_hyper_args.values()) ) for hyper_args in hyper_args_list: cmd = base_cmd for dict_ in [base_args, hyper_args]: if dict_ is None: continue for key, value in dict_.items(): cmd += get_command(key, value) commands.append(cmd) return commands
dfa9a6ce7f8752a6617567b5210c33e5f34146bf
2,452
def mongos_program(logger, job_num, executable=None, process_kwargs=None, mongos_options=None): # pylint: disable=too-many-arguments """Return a Process instance that starts a mongos with arguments constructed from 'kwargs'.""" args = [executable] mongos_options = mongos_options.copy() if "port" not in mongos_options: mongos_options["port"] = network.PortAllocator.next_fixture_port(job_num) suite_set_parameters = mongos_options.get("set_parameters", {}) _apply_set_parameters(args, suite_set_parameters) mongos_options.pop("set_parameters") # Apply the rest of the command line arguments. _apply_kwargs(args, mongos_options) _set_keyfile_permissions(mongos_options) process_kwargs = make_historic(utils.default_if_none(process_kwargs, {})) return make_process(logger, args, **process_kwargs), mongos_options["port"]
a342db697d39e48ea0261e5ddfd89bdd99b6dced
2,453
def markerBeings(): """标记众生区块 Content-Type: application/json { "token":"", "block_id":"" } 返回 json { "is_success":bool, "data": """ try: info = request.get_json() # 验证token token = info["token"] if not auth.verifyToken(token): http_message = HttpMessage(is_success=False, data="Token无效") return http_message.getJson() # 获取列表 beings_block_id = info["block_id"] if blockOfGarbage.addGarbageBlockQueue(beings_block_id): http_message = HttpMessage(is_success=True, data="标记成功") return http_message.getJson() else: http_message = HttpMessage(is_success=False, data="该区块已经被标记") return http_message.getJson() except Exception as err: print(err) http_message = HttpMessage(is_success=False, data="参数错误") return http_message.getJson()
20b73bec5f6c27e365a90fe466e34445592f2bd3
2,454
def get_charges_with_openff(mol): """Starting from a openff molecule returns atomic charges If the charges are already defined will return them without change I not will calculate am1bcc charges Parameters ------------ mol : openff.toolkit.topology.Molecule Examples --------- from openff.toolkit.topology import Molecule mol = Molecule.from_file(SOME_FILE) # mol = Molecule.from_smiles(SMILES) get_charges_with_openff(mol) Returns ------------ np.array(float) charges in atomic units (elementary charge) Notes ---------- Some extra conformers may be generated because of https://github.com/openforcefield/openff-toolkit/issues/492 """ if (mol.partial_charges is None) or (np.allclose( mol.partial_charges / unit.elementary_charge, np.zeros([mol.n_particles]))): # NOTE: generate_conformers seems to be required for some molecules # https://github.com/openforcefield/openff-toolkit/issues/492 mol.generate_conformers(n_conformers=10) mol.compute_partial_charges_am1bcc() return mol.partial_charges.value_in_unit(unit.elementary_charge)
fa539ef60fda28a4983d632830f3a8ea813f5486
2,455
def parse_mdout(file): """ Return energies from an AMBER ``mdout` file. Parameters ---------- file : os.PathLike Name of Amber output file Returns ------- energies : dict A dictionary containing VDW, electrostatic, bond, angle, dihedral, V14, E14, and total energy. """ vdw, ele, bnd, ang, dih, v14, e14 = [], [], [], [], [], [], [] restraint = [] with open(file, "r") as f: for line in f.readlines(): words = line.rstrip().split() if len(words) > 1: if "BOND" in words[0]: bnd.append(float(words[2])) ang.append(float(words[5])) dih.append(float(words[8])) if "VDWAALS" in words[0]: vdw.append(float(words[2])) ele.append(float(words[5])) if "1-4" in words[0]: v14.append(float(words[3])) e14.append(float(words[7])) restraint.append(float(words[10])) energies = { "Bond": bnd, "Angle": ang, "Dihedral": dih, "V14": v14, "E14": e14, "VDW": vdw, "Ele": ele, "Restraint": restraint, "Total": [sum(x) for x in zip(bnd, ang, dih, v14, e14, vdw, ele)], } return energies
9310b0220d4b96b65e3484adf49edebda039dfad
2,456
from propy.AAComposition import GetSpectrumDict from typing import Optional from typing import List def aa_spectrum( G: nx.Graph, aggregation_type: Optional[List[str]] = None ) -> nx.Graph: """ Calculate the spectrum descriptors of 3-mers for a given protein. Contains the composition values of 8000 3-mers :param G: Protein Graph to featurise :type G: nx.Graph :param aggregation_type: Aggregation types to use over chains :type aggregation_type: List[Optional[str]] :return: Protein Graph with aa_spectrum feature added. G.graph["aa_spectrum_{chain | aggregation_type}"] :rtype: nx.Graph """ func = GetSpectrumDict feature_name = "aa_spectrum" return compute_propy_feature( G, func=func, feature_name=feature_name, aggregation_type=aggregation_type, )
dfd657452fda009c4420566f1456dc4bd32271ac
2,457
import hashlib def get_click_data(api, campaign_id): """Return a list of all clicks for a given campaign.""" rawEvents = api.campaigns.get(campaign_id).as_dict()["timeline"] clicks = list() # Holds list of all users that clicked. for rawEvent in rawEvents: if rawEvent["message"] == "Clicked Link": click = dict() # Builds out click document. click["user"] = hashlib.sha256( rawEvent["email"].encode("utf-8") ).hexdigest() click["source_ip"] = rawEvent["details"]["browser"]["address"] click["time"] = rawEvent["time"] click["application"] = get_application(rawEvent) clicks.append(click) return clicks
641836d73b2c5b2180a98ffc61d0382be74d2618
2,458
def multi_label_column_to_binary_columns(data_frame: pd.DataFrame, column: str): """ assuming that the column contains array objects, returns a new dataframe with binary columns (True/False) indicating presence of each distinct array element. :data_frame: the pandas DataFrame :column: the column with array values :return: a new DataFrame with binary columns """ label_unique_values = data_frame[column].str.replace( "'", '').str.split(',').explode().to_frame() drop_identical_values = label_unique_values[column].drop_duplicates( keep="first").tolist() multi_label_data_frame = pd.concat([data_frame, pd.crosstab(label_unique_values.index, label_unique_values[column])[drop_identical_values]], axis=1) return multi_label_data_frame
ab626530181740fc941e8efbbaf091bc06f0a0d8
2,459
from typing import List from typing import Set from pathlib import Path import sys def subrepositories_changed(all_if_master: bool = False) -> List[str]: # pragma: no cover """ Returns a list of the final name components of subrepositories that contain files that are different between the master branch and the current branch. Subrepositories are defined as the directories immediately under "projects" and "libraries". Example: if libraries/ABEX/foo/bar.py and projects/CellSignalling/bar/baz.py have changed, the result returned would be ["ABEX", "CellSignalling"]. If the current branch *is* master, then all subrepository names (if all_if_master) or an empty list, is returned. "master" is tried as the name of the master branch, followed by "main" if that branch does not exist. If neither is found, which may be the case during an ADO build, we look at .git/FETCH_HEAD, which may show evidence of the master branch having been fetched, and if so will tell us its commit ID. """ all_subrepos: Set[str] = set() for path in SUBREPOSITORY_PARENT_PATHS: for subrepo in path.glob("*"): if subrepo.is_dir(): all_subrepos.add(subrepo.name) repo = Repo(".") master_branch_name = None for branch in repo.branches: if branch.name in ["master", "main"]: master_branch_name = branch.name break if master_branch_name is None: fh_path = Path(".git") / "FETCH_HEAD" if fh_path.exists(): with fh_path.open() as fh: for line in fh.readlines(): if line.find("'master'") > 0 or line.find("'main'") > 0: # master_branch_name is actually a commit in this case master_branch_name = line.split(None, 1)[0] sys.stderr.write(f"Setting master 'branch' name to commit {master_branch_name}\n") break if master_branch_name is None: # Play safe: master branch not found, so assume all subrepos might have changed. sys.stderr.write("WARNING: could not find either a 'master' branch or a 'main' branch.\n") changed = all_subrepos else: changed = set() for diff in repo.index.diff(master_branch_name): for path in [Path(diff.a_path), Path(diff.b_path)]: parts = path.parts if ( len(parts) >= 2 and Path(parts[0]) in SUBREPOSITORY_PARENT_PATHS and parts[1] in all_subrepos and parts[-1] not in BASENAMES_TO_IGNORE ): changed.add(parts[1]) if changed: changed = add_subrepositories_depending_on(changed, all_subrepos, get_internal_requirements) elif all_if_master and current_commit_is_master(repo, master_branch_name): changed = all_subrepos # Remove subrepositories that appear to be submodules apparent_submodules = set(path.parent.name for path in Path(".").glob("*/*/.git")) result = [name for name in sorted(changed) if name not in apparent_submodules] return result
962464acf03d44017475cb5327106c225de777c8
2,460
import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from matplotlib.colors import LightSource def show_landscape(adata, Xgrid, Ygrid, Zgrid, basis="umap", save_show_or_return='show', save_kwargs={}, ): """Plot the quasi-potential landscape. Parameters ---------- adata: :class:`~anndata.AnnData` AnnData object that contains Xgrid, Ygrid and Zgrid data for visualizing potential landscape. Xgrid: `numpy.ndarray` x-coordinates of the Grid produced from the meshgrid function. Ygrid: `numpy.ndarray` y-coordinates of the Grid produced from the meshgrid function. Zgrid: `numpy.ndarray` z-coordinates or potential at each of the x/y coordinate. basis: `str` (default: umap) The method of dimension reduction. By default it is trimap. Currently it is not checked with Xgrid and Ygrid. save_show_or_return: {'show', 'save', 'return'} (default: `show`) Whether to save, show or return the figure. save_kwargs: `dict` (default: `{}`) A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function will use the {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys according to your needs. Returns ------- A 3D plot showing the quasi-potential of each cell state. """ if "grid_Pot_" + basis in adata.uns.keys(): Xgrid_, Ygrid_, Zgrid_ = ( adata.uns["grid_Pot_" + basis]["Xgrid"], adata.uns["grid_Pot_" + basis]["Ygrid"], adata.uns["grid_Pot_" + basis]["Zgrid"], ) Xgrid = Xgrid_ if Xgrid is None else Xgrid Ygrid = Ygrid_ if Ygrid is None else Ygrid Zgrid = Zgrid_ if Zgrid is None else Zgrid fig = plt.figure() ax = fig.gca(projection="3d") # Plot the surface. ls = LightSource(azdeg=0, altdeg=65) # Shade data, creating an rgb array. rgb = ls.shade(Zgrid, plt.cm.RdYlBu) surf = ax.plot_surface( Xgrid, Ygrid, Zgrid, cmap=cm.coolwarm, rstride=1, cstride=1, facecolors=rgb, linewidth=0, antialiased=False, ) # Customize the z axis. ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f")) # Add a color bar which maps values to colors. # fig.colorbar(surf, shrink=0.5, aspect=5) ax.set_xlabel(basis + "_1") ax.set_ylabel(basis + "_2") ax.set_zlabel("U") if save_show_or_return == "save": s_kwargs = {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} s_kwargs = update_dict(s_kwargs, save_kwargs) save_fig(**s_kwargs) elif save_show_or_return == "show": plt.tight_layout() plt.show() elif save_show_or_return == "return": return ax
35611e7b01f8a11948abe97a355be929c684af50
2,461
def _GetBuilderPlatforms(builders, waterfall): """Get a list of PerfBuilder objects for the given builders or waterfall. Otherwise, just return all platforms. """ if builders: return {b for b in bot_platforms.ALL_PLATFORMS if b.name in builders} elif waterfall == 'perf': return bot_platforms.OFFICIAL_PLATFORMS elif waterfall == 'perf-fyi': return bot_platforms.FYI_PLATFORMS else: return bot_platforms.ALL_PLATFORMS
f6d7e636bcbd941b1dde8949c68be295e0aef227
2,462
def meeting_guide(context): """ Display the ReactJS drive Meeting Guide list. """ settings = get_meeting_guide_settings() json_meeting_guide_settings = json_dumps(settings) return { "meeting_guide_settings": json_meeting_guide_settings, "mapbox_key": settings["map"]["key"], "timezone": settings["timezone"], }
46d8d20fcb2bd4dacd45a510f51eaea292da0da6
2,463
import multiprocessing def generate_input_fn(file_path, shuffle, batch_size, num_epochs): """Generates a data input function. Args: file_path: Path to the data. shuffle: Boolean flag specifying if data should be shuffled. batch_size: Number of records to be read at a time. num_epochs: Number of times to go through all of the records. Returns: A function useed by `Estimator` to read data. """ def _input_fn(): """Returns features and target from input data. Defines the input dataset, specifies how to read the data, and reads it. Returns: A tuple os a dictionary containing the features and the target. """ num_threads = multiprocessing.cpu_count() dataset = tf.data.TextLineDataset(filenames=[file_path]) dataset = dataset.skip(1) dataset = dataset.map(lambda x: parse_csv( tf.expand_dims(x, -1)), num_parallel_calls=num_threads) dataset = dataset.map(get_features_target_tuple, num_parallel_calls=num_threads) if shuffle: dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE) dataset = dataset.batch(batch_size) dataset = dataset.repeat(num_epochs) dataset = dataset.prefetch(1) iterator = dataset.make_one_shot_iterator() features, target = iterator.get_next() return features, target return _input_fn
abf9dd66000eca392344f9663fa8418b9e596098
2,464
import numpy def amp_phase_to_complex(lookup_table): """ This constructs the function to convert from AMP8I_PHS8I format data to complex64 data. Parameters ---------- lookup_table : numpy.ndarray Returns ------- callable """ _validate_lookup(lookup_table) def converter(data): if not isinstance(data, numpy.ndarray): raise ValueError('requires a numpy.ndarray, got {}'.format(type(data))) if data.dtype.name != 'uint8': raise ValueError('requires a numpy.ndarray of uint8 dtype, got {}'.format(data.dtype.name)) if len(data.shape) == 3: raise ValueError('Requires a three-dimensional numpy.ndarray (with band ' 'in the last dimension), got shape {}'.format(data.shape)) out = numpy.zeros((data.shape[0], data.shape[1], data.shape[2]/2), dtype=numpy.complex64) amp = lookup_table[data[:, :, 0::2]] theta = data[:, :, 1::2]*(2*numpy.pi/256) out.real = amp*numpy.cos(theta) out.imag = amp*numpy.sin(theta) return out return converter
dea38027654a5a2b6ab974943dbdc57b36835a8e
2,465
from operator import concat def combine_aqs_cmaq(model, obs): """Short summary. Parameters ---------- model : type Description of parameter `model`. obs : type Description of parameter `obs`. Returns ------- type Description of returned object. """ g = obs.df.groupby('Species') comparelist = sort(obs.df.Species.unique()) dfs = [] for i in comparelist: if (i == 'OZONE'): # & ('O3' in model.keys): print('Interpolating Ozone:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='O3', aqs_param=i) print(fac) cmaq = model.get_var(lay=0, param='O3').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) # df.Obs, df.CMAQ = df.Obs, df.CMAQ df.Units = 'PPB' dfs.append(df) elif i == 'PM2.5': if ('PM25_TOT' in model.keys) | ('ASO4J' in model.keys): print('Interpolating PM2.5:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='PM25', aqs_param=i) cmaq = model.get_var(lay=0, param='PM25').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'CO': if 'CO' in model.keys: print('Interpolating CO:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='CO', aqs_param=i) cmaq = model.get_var(lay=0, param='CO').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'NOY': if 'NOY' in model.keys: print('Interpolating NOY:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='NOY', aqs_param=i) cmaq = model.get_var(lay=0, param='NOY').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'SO2': if 'SO2' in model.keys: print('Interpolating SO2') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='SO2', aqs_param=i) cmaq = model.get_var(lay=0, param='SO2').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'NOX': if ('NO' in model.keys) | ('NO2' in model.keys): print('Interpolating NOX:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='NOX', aqs_param=i) cmaq = model.get_var(lay=0, param='NOX').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'NO': if ('NO' in model.keys): print('Interpolating NO:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='NO', aqs_param=i) cmaq = model.get_var(lay=0, param='NO').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'NO2': if ('NO2' in model.keys): print('Interpolating NO2:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='NO2', aqs_param=i) cmaq = model.get_var(lay=0, param='NO2').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'SO4f': if ('PM25_SO4' in model.keys) | ('ASO4J' in model.keys) | ('ASO4I' in model.keys): print('Interpolating PSO4:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='SO4f', aqs_param=i) cmaq = model.get_var(lay=0, param='SO4f').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'PM10': if ('PM_TOTAL' in model.keys) or ('ASO4K' in model.keys): print('Interpolating PM10:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='PM10', aqs_param=i) cmaq = model.get_var(lay=0, param='PM10').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'NO3f': if ('PM25_NO3' in model.keys) | ('ANO3J' in model.keys) | ('ANO3I' in model.keys): print('Interpolating PNO3:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='NO3f', aqs_param=i) cmaq = model.get_var(lay=0, param='NO3F').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'ECf': if ('PM25_EC' in model.keys) | ('AECI' in model.keys) | ('AECJ' in model.keys): print('Interpolating PEC:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='ECf', aqs_param=i) cmaq = model.get_var(lay=0, param='ECf').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'OCf': if ('APOCJ' in model.keys): print('Interpolating OCf:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='OCf', improve_param=i) cmaqvar = model.get_var(lay=0, param='OC').compute() * fac df = interpo.interp_to_obs(cmaqvar, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'ETHANE': if ('ETHA' in model.keys): print('Interpolating Ethane:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='ETHA', aqs_param=i) cmaq = model.get_var(lay=0, param='ETHA').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'BENZENE': if ('BENZENE' in model.keys): print('Interpolating BENZENE:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='BENZENE', aqs_param=i) cmaq = model.get_var(lay=0, param='BENZENE').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'TOLUENE': if ('TOL' in model.keys): print('Interpolating Toluene:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='TOL', aqs_param=i) cmaq = model.get_var(lay=0, param='TOL').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'ISOPRENE': if ('ISOP' in model.keys): print('Interpolating Isoprene:') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='ISOP', aqs_param=i) cmaq = model.get_var(lay=0, param='ISOP').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'O-XYLENE': if ('XYL' in model.keys): print('Interpolating Xylene') df = g.get_group(i) fac = epa_util.check_cmaq_units(df, param='XYL', aqs_param=i) cmaq = model.get_var(lay=0, param='XYL').compute() * fac df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'WS': if ('WSPD10' in model.keys): print('Interpolating WS:') df = g.get_group(i) cmaq = model.get_var(lay=0, param='WSPD10') df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'TEMP': if 'TEMP2' in model.keys: print('Interpolating TEMP:') df = g.get_group(i) cmaq = model.get_var(lay=0, param='TEMP2') df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) elif i == 'WD': if ('WDIR10' in model.keys): print('Interpolating WD:') df = g.get_group(i) cmaq = model.get_var(lay=0, param='WDIR10') df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values, radius=model.dset.XCELL) dfs.append(df) df = concat(dfs) df.dropna(subset=['Obs', 'model'], inplace=True) return df
bbc6ba6faf0f580d35674a912c245349d00d2a95
2,466
import sys def bbknn_pca_matrix( pca, batch_list, neighbors_within_batch=3, n_pcs=50, trim=None, approx=True, n_trees=10, use_faiss=True, metric="angular", set_op_mix_ratio=1, local_connectivity=1, ): """ Scanpy-independent BBKNN variant that runs on a PCA matrix and list of per-cell batch assignments instead of an AnnData object. Non-data-entry arguments behave the same way as ``bbknn.bbknn()``. Returns a ``(distances, connectivities, parameters)`` tuple, like what would have been stored in the AnnData object. The connectivities are the actual neighbourhood graph. Input ----- pca : ``numpy.array`` PCA (or other dimensionality reduction) coordinates for each cell, with cells as rows. batch_list : ``numpy.array`` or ``list`` A list of batch assignments for each cell. """ # more basic sanity checks/processing # do we have the same number of cells in pca and batch_list? if pca.shape[0] != len(batch_list): raise ValueError( "Different cell counts indicated by `pca.shape[0]` and `len(batch_list)`." ) # convert batch_list to np.array of strings for ease of mask making later batch_list = np.asarray([str(i) for i in batch_list]) # assert that all batches have at least neighbors_within_batch cells in there unique, counts = np.unique(batch_list, return_counts=True) if np.min(counts) < neighbors_within_batch: raise ValueError( "Not all batches have at least `neighbors_within_batch` cells in them." ) # metric sanity checks (duplicating the ones in bbknn(), but without scanpy logging) if approx and metric not in ["angular", "euclidean", "manhattan", "hamming"]: print( "unrecognised metric for type of neighbor calculation, switching to angular" ) metric = "angular" elif not approx and not ( metric == "euclidean" or isinstance(metric, DistanceMetric) or metric in KDTree.valid_metrics ): print( "unrecognised metric for type of neighbor calculation, switching to euclidean" ) metric = "euclidean" # obtain the batch balanced KNN graph knn_distances, knn_indices = get_graph( pca=pca, batch_list=batch_list, n_pcs=n_pcs, n_trees=n_trees, approx=approx, metric=metric, use_faiss=use_faiss, neighbors_within_batch=neighbors_within_batch, ) # sort the neighbours so that they're actually in order from closest to furthest newidx = np.argsort(knn_distances, axis=1) knn_indices = knn_indices[ np.arange(np.shape(knn_indices)[0])[:, np.newaxis], newidx ] knn_distances = knn_distances[ np.arange(np.shape(knn_distances)[0])[:, np.newaxis], newidx ] # this part of the processing is akin to scanpy.api.neighbors() dist, cnts = compute_connectivities_umap( knn_indices, knn_distances, knn_indices.shape[0], knn_indices.shape[1], set_op_mix_ratio=set_op_mix_ratio, local_connectivity=local_connectivity, ) # trimming. compute default range if absent if trim is None: trim = 10 * knn_distances.shape[1] # skip trimming if set to 0, otherwise trim if trim > 0: cnts = trimming(cnts=cnts, trim=trim) # create a collated parameters dictionary # determine which neighbour computation was used, mirroring create_tree() logic if approx: computation = "annoy" elif metric == "euclidean": if "faiss" in sys.modules and use_faiss: computation = "faiss" else: computation = "cKDTree" else: computation = "KDTree" # we'll have a zero distance for our cell of origin, and nonzero for every other neighbour computed params = { "n_neighbors": len(dist[0, :].data) + 1, "method": "umap", "metric": metric, "n_pcs": n_pcs, "bbknn": {"trim": trim, "computation": computation}, } return (dist, cnts, params)
1b21bd83b53c3a6418596e6c9d055f34adc726c5
2,467
def read_1d_spikes(filename): """Reads one dimensional binary spike file and returns a td_event event. The binary file is encoded as follows: * Each spike event is represented by a 40 bit number. * First 16 bits (bits 39-24) represent the neuronID. * Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event. * the last 23 bits (bits 22-0) represent the spike event timestamp in microseconds. Parameters ---------- filename : str name of spike file. Returns ------- Event spike event. Examples -------- >>> td_event = read_1d_spikes(file_path) """ with open(filename, 'rb') as input_file: input_byte_array = input_file.read() input_as_int = np.asarray([x for x in input_byte_array]) x_event = (input_as_int[0::5] << 8) | input_as_int[1::5] c_event = input_as_int[2::5] >> 7 t_event = ( (input_as_int[2::5] << 16) | (input_as_int[3::5] << 8) | (input_as_int[4::5]) ) & 0x7FFFFF # convert spike times to ms return Event(x_event, None, c_event, t_event / 1000)
034d6de1e38734fcfe131027956d781752163c33
2,468
def _parse_step_log(lines): """Parse the syslog from the ``hadoop jar`` command. Returns a dictionary which potentially contains the following keys: application_id: a string like 'application_1449857544442_0002'. Only set on YARN counters: a map from counter group -> counter -> amount, or None if no counters found (only YARN prints counters) errors: a list of errors, with the following keys: hadoop_error: message: lines of error, as as string start_line: first line of log containing the error (0-indexed) num_lines: # of lines of log containing the error attempt_id: ID of task attempt with this error job_id: a string like 'job_201512112247_0003'. Should always be set output_dir: a URI like 'hdfs:///user/hadoop/tmp/my-output-dir'. Should always be set on success. """ return _parse_step_log_from_log4j_records( _parse_hadoop_log4j_records(lines))
251b0e89157a1c3fa152cbf50daa5e0b10e17bcc
2,469
import re def is_regex(regex, invert=False): """Test that value matches the given regex. The regular expression is searched against the value, so a match in the middle of the value will succeed. To specifically match the beginning or the whole regex, use anchor characters. If invert is true, then matching the regex will cause the test to fail. """ # pylint: disable=unused-argument # args defined by test definition rex = re.compile(regex) def is_regex_test(conf, path, value): match = rex.search(value) if invert and match: return u'"{0}" matches /{1}/'.format(value, regex) if not invert and not match: return u'"{0}" does not match /{1}/'.format(value, regex) return None return is_regex_test
0db71b3dae2b2013650b65ecacfe6aed0cd8366b
2,470
def get_obj(obj): """Opens the url of `app_obj`, builds the object from the page and returns it. """ open_obj(obj) return internal_ui_operations.build_obj(obj)
f6deddc62f7f3f59ab93b553c64b758340b5fa6c
2,471
def process(frame): """Process initial frame and tag recognized objects.""" # 1. Convert initial frame to grayscale grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # For every model: for model, color, parameters in ( (MODEL_FACE, (255, 255, 0), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (30, 30)}), (MODEL_EYE, (0, 0, 255), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (20, 20)}), *((model, (0, 255, 0), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (20, 20)}) for model in MODELS_PLATE), ): # 2. Apply model, recognize objects objects = model.detectMultiScale(grayframe, **parameters) # 3. For every recognized object, draw a rectangle around it for (x, y, w, h) in objects: cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) # BGR # 4. Return initial color frame with rectangles return frame
c0c977d522f292d6cbb03c4e64eabc3d11342e0f
2,472
from datetime import datetime def tzoffset(): """UTC to America/New_York offset.""" return datetime.timedelta(hours=5)
05e883eeae63ad1dd7b287dd0b331b13b11b8cd1
2,473
import tempfile import subprocess import os def ssh(instance, command, plain=None, extra=None, command_args=None): """Run ssh command. Parameters: instance(MechInstance): a mech instance command(str): command to execute (ex: 'chmod +x /tmp/file') plain(bool): use user/pass auth extra(str): arguments to pass to ssh command_args(str): arguments for command Returns: return_code(int): 0=success stdout(str): Output from the command stderr(str): Error from the command Note: May not really need the tempfile if self.use_psk==True. Using the tempfile, there are options to not add host to the known_hosts files which is useful, but could be MITM attacks. Not likely locally, but still could be an issue. """ LOGGER.debug('command:%s plain:%s extra:%s command_args:%s', command, plain, extra, command_args) if instance.created: state = instance.get_vm_state() if vm_ready_based_on_state(state): config_ssh = instance.config_ssh() temp_file = tempfile.NamedTemporaryFile(delete=False) try: temp_file.write(config_ssh_string(config_ssh).encode('utf-8')) temp_file.close() cmds = ['ssh'] if not plain: cmds.extend(('-F', temp_file.name)) if not plain: cmds.append(config_ssh['Host']) if extra: cmds.append(extra) if command: cmds.extend(('--', command)) if command_args: cmds.append(command_args) LOGGER.debug('cmds:%s', cmds) # if running a script if command: result = subprocess.run(cmds, capture_output=True) stdout = result.stdout.decode('utf-8').strip() stderr = result.stderr.decode('utf-8').strip() return result.returncode, stdout, stderr else: # interactive return subprocess.call(cmds), None, None finally: os.unlink(temp_file.name) else: return 1, '', 'VM not ready({})'.format(state)
16600ff5c1d527c3b10fb2c3382955e8c76f221b
2,474
def KICmag(koi,band): """ Returns the apparent magnitude of given KOI star in given band. returns KICmags(koi)[band] """ return KICmags(koi)[band]
767ff04c9319acd698daa05f084e8ee9c456a628
2,475
from typing import List def list_to_decimal(nums: List[int]) -> int: """Accept a list of positive integers in the range(0, 10) and return a integer where each int of the given list represents decimal place values from first element to last. E.g [1,7,5] => 175 [0,3,1,2] => 312 Place values are 10**n where n represents the digit position Eg to calculate 1345, we have 5 1's, 4 10's, 3 100's and 1 1000's 1, 3 , 4 , 5 1000's, 100's, 10's, 1's """ for num in nums: if isinstance(num, bool) or not isinstance(num, int): raise TypeError elif not num in range(0, 10): raise ValueError return int("".join(map(str, nums)))
7727ce610987fc9da03a5e23ec8674d1deb7c7f0
2,476
def str_to_bool(v): """ :type v: str """ return v.lower() in ("true", "1")
3eb7ae9e1fe040504ea57c65ed1cbd48be9269cf
2,477
def home_event_manager(): """ Route for alumni's home :return: """ if "idUsers" in session and session["UserTypes_idUserTypes"] == 2: return redirect("/events") else: session.clear() return redirect("/login")
7facf96fbd5d8bbcb7fb867cac7a150a31185dde
2,478
import os def define_url_service(settings_dict) -> str: """Define the url service for the client. It prioritizes ENV variable over settings module""" url = os.environ.get(defaults.SERVICE_URL_ENV) if url: return url else: return settings_dict.get("WORKFLOW_SERVICE", defaults.SERVICE_URL)
ec7de5359cd5e78ba5f063f183b55e4acd318025
2,479
import hashlib def md5_encode(text): """ 把數據 md5 化 """ md5 = hashlib.md5() md5.update(text.encode('utf-8')) encodedStr = md5.hexdigest().upper() return encodedStr
b08f656f5ab0858accfbf54e03d95635a3598e13
2,480
from typing import Counter def _ngrams(segment, n): """Extracts n-grams from an input segment. Parameters ---------- segment: list Text segment from which n-grams will be extracted. n: int Order of n-gram. Returns ------- ngram_counts: Counter Contain all the nth n-grams in segment with a count of how many times each n-gram occurred. """ ngram_counts = Counter() for i in range(0, len(segment) - n + 1): ngram = tuple(segment[i:i + n]) ngram_counts[ngram] += 1 return ngram_counts
580cad34eb03359988eb2ce6f77dad246166b890
2,481
def build_varint(val): """Build a protobuf varint for the given value""" data = [] while val > 127: data.append((val & 127) | 128) val >>= 7 data.append(val) return bytes(data)
46f7cd98b6858c003cd66d87ba9ec13041fcf9db
2,482
import re def python_safe_name(s): """ Return a name derived from string `s` safe to use as a Python function name. For example: >>> s = "not `\\a /`good` -safe name ??" >>> assert python_safe_name(s) == 'not_good_safe_name' """ no_punctuation = re.compile(r'[\W_]', re.MULTILINE).sub s = s.lower() s = no_punctuation(' ', s) s = '_'.join(s.split()) if py2 and isinstance(s, unicode): s = s.encode('ascii', 'ignore') return s
463d7c3bf4f22449a0a1c28897654d3ccb5e94cb
2,483
def hash_bytes(hash_type: SupportedHashes, bytes_param: bytes) -> bytes: """Hash arbitrary bytes using a supported algo of your choice. Args: hash_type: SupportedHashes enum type bytes_param: bytes to be hashed Returns: hashed bytes """ hasher = get_hash_obj(hash_type) hasher.update(bytes_param) return hasher.digest()
8f9c05fd050e6f89d6bc5213c03f4002cc341cb0
2,484
def analyze(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None): """ Performs an analysis step. Returns 0 if successful, and <0 if fail Parameters ---------- osi num_inc dt dt_min dt_max jd Returns ------- """ op_type = 'analyze' if dt is None: parameters = [int(num_inc)] elif dt_min is None: parameters = [int(num_inc), float(dt)] else: parameters = [int(num_inc), float(dt), dt_min, dt_max, jd] return osi.to_process(op_type, parameters)
6c748a49c5e54cf88a04002d98995f4fd90d5130
2,485
from typing import Any import importlib def load_class(path: str) -> Any: """ Load a class at the provided location. Path is a string of the form: path.to.module.class and conform to the python import conventions. :param path: string pointing to the class to load :return: the requested class object """ try: log.info('loading class : [{}]'.format(path)) module_name, class_name = path.rsplit('.', 1) mod = importlib.import_module(module_name) return getattr(mod, class_name) except Exception: raise ProcessingError('Class loading error : expecting path.to.module.ClassName, got : {}'.format(path))
c6ae2cd20f71a68a6ec05ef5693656d0db7f2703
2,486
def dcg_at_k(r, k, method=0): """Score is discounted cumulative gain (dcg) Relevance is positive real values. Can use binary as the previous methods. Example from http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf # >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0] # >>> dcg_at_k(r, 1) # 3.0 # >>> dcg_at_k(r, 1, method=1) # 3.0 # >>> dcg_at_k(r, 2) # 5.0 # >>> dcg_at_k(r, 2, method=1) # 4.2618595071429155 # >>> dcg_at_k(r, 10) # 9.6051177391888114 # >>> dcg_at_k(r, 11) 9.6051177391888114 Args: r: Relevance scores (list or numpy) in rank order (first element is the first item) k: Number of results to consider method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...] If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...] Returns: Discounted cumulative gain """ r = np.asfarray(r)[:k] if r.size: if method == 0: return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1))) elif method == 1: return np.sum(r / np.log2(np.arange(2, r.size + 2))) else: raise ValueError('method must be 0 or 1.') return 0.
a52b2e3735461ea7749f092aa35cfe8a55f33e3f
2,487
def metric_section(data_model, metric, level) -> str: """Return the metric as Markdown section.""" markdown = markdown_header(metric["name"], level=level, index=True) markdown += markdown_paragraph(metric["description"]) markdown += definition_list("Default target", metric_target(metric)) markdown += definition_list("Scales", *metric_scales(metric)) markdown += definition_list("Default tags", *metric["tags"]) markdown += "```{admonition} Supporting sources\n" for source in metric["sources"]: source_name = data_model["sources"][source]["name"] default = " (default)" if source == metric.get("default_source", "no default source") else "" markdown += f"- [{source_name}]({metric_source_slug(metric['name'], source_name)}){default}\n" markdown += "```\n" return markdown
6c02e707b6de7c2d89e7eb590d3d9252f13ae9b7
2,488
import argparse def make_argument_parser(): """ Creates an ArgumentParser to read the options for this script from sys.argv """ parser = argparse.ArgumentParser() parser.add_argument("nifti", help="Nifti file to be processed.") parser.add_argument("--out", default=None, help="Output pickle file of roi dict.") parser.add_argument("--txt", default=None, help="Readable txt file of rois.") return parser
0340c3406e2418b3dce8e1b532181101b3aabf1a
2,489
import os def get_pkgs(rpmdir): """scan a dir of rpms and generate a pkgs structure. first try parsing the filename. if that fails, try parsing the rpm headers. """ pkgs = {} """ pkgs structure: * pkgs is a dict of package name, rpmblob list pairs: pkgs = {name:[rpmblob,rpmblob...], name:[rpmblob,rpmblob...]} * rpmblob is a dict describing an rpm file: rpmblob = {'file':'foo-0.1-5.i386.rpm', 'name':'foo', 'version':'0.1', 'release':'5', 'subarch':'i386'}, example: pkgs = { 'foo' : [ {'file':'foo-0.1-5.i386.rpm', 'name':'foo', 'version':'0.1', 'release':'5', 'subarch':'i386'}, {'file':'foo-0.2-3.i386.rpm', 'name':'foo', 'version':'0.2', 'release':'3', 'subarch':'i386'}], 'bar' : [ {'file':'bar-3.2a-12.mips.rpm', 'name':'bar', 'version':'3.2a', 'release':'12', 'subarch':'mips'}, {'file':'bar-3.7j-4.mips.rpm', 'name':'bar', 'version':'3.7j', 'release':'4', 'subarch':'mips'}] } """ rpms = [item for item in os.listdir(rpmdir) if item.endswith('.rpm')] for filename in rpms: (name, version, release, subarch) = parse_rpm_filename(rpmdir, filename) rpmblob = {'file': filename, 'name': name, 'version': version, 'release': release, 'subarch': subarch} if name in pkgs: pkgs[name].append(rpmblob) else: pkgs[name] = [rpmblob] return pkgs
b1781254be8dc4ed3f9147c2e96fa4d871322075
2,490
def MAKEFOURCC(ch0: str, ch1: str, ch2: str, ch3: str) -> int: """Implementation of Window's `MAKEFOURCC`. This is simply just returning the bytes of the joined characters. `MAKEFOURCC(*"DX10")` can also be implemented by `Bytes(b"DX10")`. Args: ch0 (str): First char ch1 (str): Second char ch2 (str): Third char ch3 (str): Fourth char Returns: int: The integer representation of given characters. **Reference**: `Microsoft <https://goo.gl/bjtMFA>`__ """ return (ord(ch0) << 0) | (ord(ch1) << 8) | (ord(ch2) << 16) | (ord(ch3) << 24)
91afd9dcc8f1cd8c5ef167bdb560c8bf2d89b228
2,491
def sort_configs(configs): # pylint: disable=R0912 """Sort configs by global/package/node, then by package name, then by node name Attributes: configs (list): List of config dicts """ result = [] # Find all unique keys and sort alphabetically _keys = [] for config in configs: if config["key"] not in _keys: _keys.append(config["key"]) _keys = sorted(_keys, key=str.lower) # For each key find globals, then packages, then nodes for key in _keys: _packages = [] _nodes = [] for config in configs: if config["key"] == key: if config["type"] == "global": result.append(config) elif config["type"] == "package": _packages.append(config) elif config["type"] == "node": _nodes.append(config) # Sort the package end node elements alphabetically _package_ids = sorted([_package["id"] for _package in _packages], key=str.lower) for package in _package_ids: for config in configs: if config["key"] == key and config["type"] == "package" and config["id"] == package: result.append(config) break _node_ids = sorted([_node["id"] for _node in _nodes], key=str.lower) for node in _node_ids: for config in configs: if config["key"] == key and config["type"] == "node" and config["id"] == node: result.append(config) break return result
5c05214af42a81b35986f3fc0d8670fbef2e2845
2,492
from pathlib import Path import re import os def read_user_config(): """Returns keys in lowercase of xlwings.conf in the user's home directory""" config = {} if Path(xlwings.USER_CONFIG_FILE).is_file(): with open(xlwings.USER_CONFIG_FILE, "r") as f: for line in f: values = re.findall(r'"[^"]*"', line) if values: config[values[0].strip('"').lower()] = os.path.expandvars( values[1].strip('"') ) return config
2d179171c4b2763837fc3e7e7c79280756fdd9e9
2,493
def MakeTableData( visible_results, starred_items, lower_columns, lower_group_by, users_by_id, cell_factories, id_accessor, related_issues, config, context_for_all_issues=None): """Return a list of list row objects for display by EZT. Args: visible_results: list of artifacts to display on one pagination page. starred_items: list of IDs/names of items in the current project that the signed in user has starred. lower_columns: list of column names to display, all lowercase. These can be combined column names, e.g., 'priority/pri'. lower_group_by: list of column names that define row groups, all lowercase. users_by_id: dict mapping user IDs to UserViews. cell_factories: dict of functions that each create TableCell objects. id_accessor: function that maps from an artifact to the ID/name that might be in the starred items list. related_issues: dict {issue_id: issue} of pre-fetched related issues. config: ProjectIssueConfig PB for the current project. context_for_all_issues: A dictionary of dictionaries containing values passed in to cell factory functions to create TableCells. Dictionary form: {issue_id: {'rank': issue_rank, 'issue_info': info_value, ..}, issue_id: {'rank': issue_rank}, ..} Returns: A list of TableRow objects, one for each visible result. """ table_data = [] group_cell_factories = [ ChooseCellFactory(group.strip('-'), cell_factories, config) for group in lower_group_by] # Make a list of cell factories, one for each column. factories_to_use = [ ChooseCellFactory(col, cell_factories, config) for col in lower_columns] current_group = None for idx, art in enumerate(visible_results): row = MakeRowData( art, lower_columns, users_by_id, factories_to_use, related_issues, config, context_for_all_issues) row.starred = ezt.boolean(id_accessor(art) in starred_items) row.idx = idx # EZT does not have loop counters, so add idx. table_data.append(row) row.group = None # Also include group information for the first row in each group. # TODO(jrobbins): This seems like more overhead than we need for the # common case where no new group heading row is to be inserted. group = MakeRowData( art, [group_name.strip('-') for group_name in lower_group_by], users_by_id, group_cell_factories, related_issues, config, context_for_all_issues) for cell, group_name in zip(group.cells, lower_group_by): cell.group_name = group_name if group == current_group: current_group.rows_in_group += 1 else: row.group = group current_group = group current_group.rows_in_group = 1 return table_data
aab10e8437cae0a5592dceacf2dc60774706d560
2,494
def _add_student_submit(behave_sensibly): """Allow addition of new students Handle both "good" and "bad" versions (to keep code DRY) """ try: if behave_sensibly: do_add_student_good( first_name=request.forms.first_name, last_name=request.forms.last_name, card_info=request.forms.card_info, ) else: do_add_student_bad( first_name=request.forms.first_name, last_name=request.forms.last_name, card_info=request.forms.card_info, ) except psycopg2.DatabaseError: pass return redirect("/students")
780b0e667a841b51b1b64c8c8156cebda3d586e9
2,495
def _get_table_reference(self, table_id): """Constructs a TableReference. Args: table_id (str): The ID of the table. Returns: google.cloud.bigquery.table.TableReference: A table reference for a table in this dataset. """ return TableReference(self, table_id)
e92dc5fbeac84b902e50d5302539503246c39f30
2,496
import os def request_item_js( request ): """ Returns modified javascript file for development. Hit by a `dev_josiah_request_item.js` url; production hits the apache-served js file. """ js_unicode = u'' current_directory = os.path.dirname(os.path.abspath(__file__)) js_path = u'%s/lib/josiah_request_item.js' % current_directory with open( js_path ) as f: js_utf8 = f.read() js_unicode = js_utf8.decode( u'utf-8' ) js_unicode = js_unicode.replace( u'library.brown.edu', request.get_host() ) return HttpResponse( js_unicode, content_type = u'application/javascript; charset=utf-8' )
0078c218b9089b601d497d325e7aad51f2c14cd9
2,497
def get_present_types(robots): """Get unique set of types present in given list""" return {type_char for robot in robots for type_char in robot.type_chars}
75c33e0bf5f97afe93829c51086100f8e2ba13af
2,498
def _deserialize_row(params, mask): """ This is for stochastic vectors where some elements are forced to zero. Such a vector is defined by a number of parameters equal to the length of the vector minus one and minus the number of elements forced to zero. @param params: an array of statistical parameters @param mask: bools such that False forces zero probability @return: a mask-conformant list of nonnegative floats """ row = np.zeros(mask.shape) row[mask] = [1.0] + np.exp(params).tolist() row /= row.sum() return row
004775ef669ce7698570091c7212912d0f309bee
2,499