content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Dict from typing import List def _unpack_school_column_aliases() -> Dict[str, List[str]]: """ Unpack the known aliases for lookup table of alias_column_name -> schema_column_name. :return: lookup table. :raises: ValueError if an alias has more than one mapping to a schema column """ result = dict() # add to the lookup table all the known aliases from School_aliases module for (schema_column_name, aliases) in School_aliases.items(): for alias_column_name in aliases: k = alias_column_name.lower() v = schema_column_name.lower() if result.get(k) is not None: raise ValueError(f"duplicate alias {v} for column name: {k}") result[k] = v return result
3c6cff79c2ce50dff655bee7ac1626277b00e155
3,659,308
def suntimecorr(ra, dec, obst, coordtable, verbose=False): """ This function calculates the light-travel time correction from observer to a standard location. It uses the 2D coordinates (RA and DEC) of the object being observed and the 3D position of the observer relative to the standard location. The latter (and the former, for solar-system objects) may be gotten from JPL's Horizons system. Parameters: ----------- ra : Float Right ascension of target object in radians. dec : Float Declination of target object in radians. obst : Float or Numpy Float array Time of observation in Julian Date (may be a vector) coordtable : String Filename of output table from JPL HORIZONS specifying the position of the observatory relative to the standard position. verbose : Boolean If True, print X,Y,Z coordinates. Returns: -------- This function returns the time correction in seconds to be ADDED to the observation time to get the time when the observed photons would have reached the plane perpendicular to their travel and containing the reference position. Notes: ------ The position vectors from coordtable are given in the following coordinate system: Reference epoch: J2000.0 xy-plane: plane of the Earth's mean equator at the reference epoch x-axis : out along ascending node of instantaneous plane of the Earth's orbit and the Earth's mean equator at the reference epoch z-axis : along the Earth mean north pole at the reference epoch Ephemerides are often calculated for BJD, barycentric Julian date. That is, they are correct for observations taken at the solar system barycenter's distance from the target. The BJD of our observation is the time the photons we observe would have crossed the sphere centered on the object and containing the barycenter. We must thus add the light-travel time from our observatory to this sphere. For non-solar-system observations, we approximate the sphere as a plane, and calculate the dot product of the vector from the barycenter to the telescope and a unit vector to from the barycenter to the target, and divide by the speed of light. Properly, the coordinates should point from the standard location to the object. Practically, for objects outside the solar system, the adjustment from, e.g., geocentric (RA-DEC) coordinates to barycentric coordinates has a negligible effect on the trig functions used in the routine. The horizons file in coordtable should be in the form of the following example, with a subject line of JOB: !$$SOF ! ! Example e-mail command file. If mailed to "horizons@ssd.jpl.nasa.gov" ! with subject "JOB", results will be mailed back. ! ! This example demonstrates a subset of functions. See main doc for ! full explanation. Send blank e-mail with subject "BATCH-LONG" to ! horizons@ssd.jpl.nasa.gov for complete example. ! EMAIL_ADDR = 'shl35@cornell.edu' ! Send output to this address ! (can be blank for auto-reply) COMMAND = '-79' ! Target body, closest apparition OBJ_DATA = 'YES' ! No summary of target body data MAKE_EPHEM = 'YES' ! Make an ephemeris START_TIME = '2005-Aug-24 06:00' ! Start of table (UTC default) STOP_TIME = '2005-Aug-25 02:00' ! End of table STEP_SIZE = '1 hour' ! Table step-size TABLE_TYPE = 'VECTOR' ! Specify VECTOR ephemeris table type CENTER = '@10' ! Set observer (coordinate center) REF_PLANE = 'FRAME' ! J2000 equatorial plane VECT_TABLE = '3' ! Selects output type (3=all). OUT_UNITS = 'KM-S' ! Vector units# KM-S, AU-D, KM-D CSV_FORMAT = 'NO' ! Comma-separated output (YES/NO) VEC_LABELS = 'YES' ! Label vectors in output (YES/NO) VECT_CORR = 'NONE' ! Correct for light-time (LT), ! or lt + stellar aberration (LT+S), ! or (NONE) return geometric ! vectors only. !$$EOF Example: --------- >>> # Spitzer is in nearly the Earth's orbital plane. Light coming from >>> # the north ecliptic pole should hit the observatory and the sun at >>> # about the same time. >>> import suntimecorr as sc >>> ra = 18.0 * np.pi / 12 # ecliptic north pole coordinates in radians >>> dec = 66.5 * np.pi / 180 # " >>> obst = np.array([2453607.078]) # Julian date of 2005-08-24 14:00 >>> print( sc.suntimecorr(ra, dec, obst, '/home/esp01/ancil/horizons/cs41_spitzer.vec') ) 1.00810877 # about 1 sec, close to zero >>> # If the object has the RA and DEC of Spitzer, light time should be >>> # about 8 minutes to the sun. >>> obs = np.array([111093592.8346969, -97287023.315796047, -42212080.826677799]) >>> # vector to the object >>> obst = np.array([2453602.5]) >>> print( np.sqrt(np.sum(obs**2.0)) ) 153585191.481 # about 1 AU, good >>> raobs = np.arctan(obs[1]/ obs[0]) >>> decobs = np.arctan(obs[2]/ np.sqrt(obs[0]**2 + obs[1]**2)) >>> print(raobs, decobs) -0.7192383661, -0.2784282118 >>> print( sc.suntimecorr(raobs, decobs, obst, '/home/esp01/ancil/horizons/cs41_spitzer.vec') / 60.0) 8.5228630 # good, about 8 minutes light time to travel 1 AU Modification History: --------------------- 2005-12-01 statia Written by Statia Luszcz. 2006-03-09 jh Corrected 90deg error in algorithm, renamed, updated header, made Coordtable a positional arg since it's required, switched to radians. 2007-06-28 jh Renamed to suntimecorr since we now use barycentric Julian date. 2009-01-28 jh Change variables to long, use spline instead of linfit so we can use one HORIZONS file for the whole mission. 2009-02-22 jh Reshape spline results to shape of obst. Make it handle unsorted unput data properly. Header update. 2010-07-10 patricio Converted to python. (pcubillos@fulbrightmail.org) 2010-11-01 patricio Docstring updated. """ start_data = '$$SOE' end_data = '$$EOE' # Read in whole table as an list of strings, one string per line ctable = open(coordtable, 'r') wholetable = ctable.readlines() ctable.close() # Find start and end line i = 0 # while end has not been found: while wholetable[i].find(end_data) == -1: # if start is found get the index of next line: if wholetable[i].find(start_data) != -1: start = i + 1 i += 1 # Chop table data = wholetable[start:i-2] # Extract values: x, y, z, time = getcoords(data) # Interpolate to observing times: # We must preserve the shape and order of obst. Spline takes # monotonic input and produces linear output. x, y, z, time are # sorted as HORIZONS produces them. # Save shape of obst tshape = np.shape(obst) # Reshape to 1D and sort obstime = obst.flatten() ti = np.argsort(obstime) # indexes of sorted array by time tsize = np.size(obstime) # Allocate output arrays obsx = np.zeros(tsize) obsy = np.zeros(tsize) obsz = np.zeros(tsize) # Interpolate sorted arrays obsx[ti] = splinterp(obstime[ti], time, x) obsy[ti] = splinterp(obstime[ti], time, y) obsz[ti] = splinterp(obstime[ti], time, z) if verbose: print( 'X, Y, Z = ', obsx, obsy, obsz) # Change ra and dec into unit vector n_hat object_unit_x = np.cos(dec) * np.cos(ra) object_unit_y = np.cos(dec) * np.sin(ra) object_unit_z = np.sin(dec) # Dot product the vectors with n_hat rdotnhat = ( obsx * object_unit_x + obsy * object_unit_y + obsz * object_unit_z ) # Reshape back to the original shape rdotnhat = rdotnhat.reshape(tshape) # Time correction is: dt = length/velocity # Divide by the speed of light and return return rdotnhat / ( c / 1000.0 )
c214a065e41ddc85713ab351e537ab08d521f090
3,659,309
from typing import Union from typing import Tuple from re import VERBOSE import re def normalize_address(address: str, asHex: bool=False) -> Union[Tuple[str, str], Tuple[str, bytes]]: """Takes an address as raw byte or id__ and provides both formats back""" try: # convert recipient to raw if provided as id__ if address.startswith("id__"): address_raw = NyzoStringEncoder.decode(address).get_bytes().hex() if VERBOSE: print(f"Raw address is {address_raw}") else: raise RuntimeWarning("Not an id__") except: if VERBOSE: print(f"address was not a proper id_ nyzostring") address_raw = re.sub(r"[^0-9a-f]", "", address.lower()) # print(address_raw) if len(address_raw) != 64: raise ValueError("Wrong address format. 64 bytes as hex or id_ nyzostring required") if VERBOSE: print(f"Trying with {address_raw}") address = NyzoStringEncoder.encode(NyzoStringPublicIdentifier.from_hex(address_raw)) # Here we should have both recipient and recipient_raw in all cases. if asHex: return address, address_raw else: return address, bytes.fromhex(address_raw)
ce574f1b1c5231b97a444d3f3e767016cd879d6e
3,659,310
def get_welcome_response(): """ Prompt the user for the prayer """ session_attributes = {} card_title = "Welcome" speech_output = "What would you like me to pray with you? I can pray the Rosary and the Divine Mercy Chaplet." reprompt_text = "What would you like me to pray with you?" should_end_session = False return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, speech_output, should_end_session, []))
9bcf20bcc2afd96e1b3b93abb47ba654e2051884
3,659,311
from typing import Optional import math import time import requests import textwrap def get_news( limit: int = 60, post_kind: str = "news", filter_: Optional[str] = None, region: str = "en", ) -> pd.DataFrame: """Get recent posts from CryptoPanic news aggregator platform. [Source: https://cryptopanic.com/] Parameters ---------- limit: int number of news to fetch post_kind: str Filter by category of news. Available values: news or media. filter_: Optional[str] Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol region: str Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch), es (Español), fr (Français), it (Italiano), pt (Português), ru (Русский) Returns ------- pd.DataFrame DataFrame with recent news from different sources filtered by provided parameters. """ if post_kind not in CATEGORIES: post_kind = "news" results = [] response = make_request(post_kind=post_kind, filter_=filter_, region=region) if response: data, next_page, _ = ( response["results"], response.get("next"), response.get("count"), ) for post in data: results.append(_parse_post(post)) number_of_pages = math.ceil(limit // 20) counter = 0 while counter < number_of_pages and next_page: counter += 1 try: time.sleep(0.2) res = requests.get(next_page).json() for post in res["results"]: results.append(_parse_post(post)) next_page = res.get("next") except Exception as e: # noqa: F841 logger.exception(str(e)) console.print( "[red]Something went wrong while fetching news from API[/red]\n" ) return pd.DataFrame() try: df = pd.DataFrame(results) df["title"] = df["title"].apply( lambda x: "\n".join(textwrap.wrap(x, width=66)) if isinstance(x, str) else x ) return df except Exception as e: # noqa: F841 logger.exception(str(e)) console.print("[red]Something went wrong with DataFrame creation[/red]\n") return pd.DataFrame() return pd.DataFrame()
8535965e4058ce4bd76b2ef44a060e4083f2128e
3,659,312
def login(): """ Display log in form and handle user login.""" email = request.args.get('emailLogin') password = request.args.get('passwordLogin') user = User.query.filter(User.email == email).first() if user is None or not check_password_hash(user.password, password): flash('Invalid email or password') return redirect('/') else: login_user(user) flash('Welcome back!') return redirect('/') return render_template('login.html')
9368ccc0e8d02baa58ce434e37e7ca7a2deb53e2
3,659,313
import _queue def model_from_queue(model): """ Returns the model dict if model is enqueued, else None.""" return _queue.get(model, None)
46eea9b8a218181b000308b080a8c9dad7e866b2
3,659,314
def get_class_name(obj, instance=True): """ Given a class or instance of a class, returns a string representing the fully specified path of the class. Parameters ---------- obj : object An instance of any object instance: bool Indicates whether given object is an instance of the class to be named """ typ = type(obj) if instance else obj return "{}.{}".format(typ.__module__, typ.__name__)
3a7ebd1fb2682ec5dff6d42cd2cccf918d67f9a0
3,659,316
def _plot_events_nday(ax, grid, events, scale_factor=1.0): """ Plot a map of the total number of days spent in dry spell events. Parameters ---------- ax : <matplotlib.axes.Axes> instance. The axes to which the map will be drawn. grid : <geo_grid.LandGrid> instance Object describing the spatial grid. events : list of lists of <event.Event> instances Catalogue of dry spell events from file_eves. scale_factor : float, optional Totals are multipled by this number before plotting. Typically used to convert from total to per year. Returns ------- PCM : mappable E.,g, <matplotlib.collections.QuadMesh>. """ nday = [] for eves in events[0]: nday.append(sum(e.duration for e in eves)) nday = np.ma.masked_less(nday, 1) nday = grid.expand(nday) * scale_factor levs = np.linspace(0, 360, 13) cmap = _get_cmap("cividis", levs, over="orange") PCM = _plot_map(ax, grid, nday, levs, cmap, ticks=levs[::3]) ax.set_title("Number of days per year spent in dry spells") ax.add_feature(LAND, facecolor="lightgrey") return PCM
dea821d98afe95790742636408b22b4c4fdc9688
3,659,319
def rank_transform(arr: np.ndarray, centered=True) -> np.ndarray: """ Transform a 1-dim ndarray with arbitrary scalar values to an array with equally spaced rank values. This is a nonlinear transform. :param arr: input array :param centered: if the transform should by centered around zero :return: transformed array """ assert isinstance(arr, np.ndarray) assert arr.ndim == 1 # Create array to sort in ranks = np.zeros_like(arr) # Ascending sort idcs_sort = np.argsort(arr) # Rearrange to an equal-step array from -0.5 (or 0) to 0.5 (or 1) if centered: ranks[idcs_sort] = np.linspace(-.5, .5, idcs_sort.size, endpoint=True) else: ranks[idcs_sort] = np.linspace(0., 1., idcs_sort.size, endpoint=True) return ranks
d6d4cbf1e191c1fa61e58309d478547f88f0550f
3,659,320
from datetime import datetime def edit_post(id, alias): """Edit an existing post. User has to be logged in and be either: - Author of the post - Editor (role) - Administrator (role) """ post = Post.query.get_or_404(id) if current_user != post.author and not ( current_user.has_role('Administrator') or current_user.has_role('Editor') ): abort(403) form = PostForm() if form.validate_on_submit(): upload = Upload.query.filter_by(filename=form.image.data).first() category = Category.query.filter_by(id=form.category.data).first() post.title = form.title.data post.alias = sanitize_alias(form.alias.data) post.timestamp = form.timestamp.data post.description = form.description.data post.body = form.body.data post.image = upload post.featured = form.featured.data post.commenting = form.commenting.data post.category = category db.session.add(post) # update tags new_tags = sanitize_tags(form.tags.data) old_tags = sanitize_tags(', '.join([c.title for c in post.tags.all()])) added_tag_titles, removed_tag_titles = get_added_removed(new_tags, old_tags) # add new tags added_tag_aliases = [sanitize_alias(c) for c in added_tag_titles] for c in zip(added_tag_titles, added_tag_aliases): tag_alias = c[1] tag = Tag.query.filter(Tag.alias == tag_alias).first() # if tag doesn't exist in the db, add it if not tag: tag = Tag(title=c[0], alias=c[1]) db.session.add(tag) # add relation between the Post and the Tag # flush session to obtain tag.id, if the tag has been added recently db.session.flush() cl = Tagification(tag_id=tag.id, post_id=id) db.session.add(cl) # remove obsolete tags removed_tag_aliases = [sanitize_alias(c) for c in removed_tag_titles] for c in zip(removed_tag_titles, removed_tag_aliases): tag_alias = c[1] tag = Tag.query.filter(Tag.alias == tag_alias).first() # remove relations old_cl = Tagification.query.filter( Tagification.tag_id == tag.id, Tagification.post_id == id ).first() db.session.delete(old_cl) # remove tag, if it's not used in other posts other_cl = Tagification.query.filter( Tagification.tag_id == tag.id, Tagification.post_id != id ).first() if not other_cl: db.session.delete(tag) flash('The post has been updated.', 'success') return redirect( url_for( 'main.post', category=post.category.alias, id=post.id, alias=post.alias ) ) form.title.data = post.title form.alias.data = post.alias form.timestamp.data = post.timestamp form.description.data = post.description form.body.data = post.body if post.image: form.image.data = post.image.filename form.featured.data = post.featured form.commenting.data = post.commenting form.category.data = post.category form.tags.data = ', '.join([c.title for c in post.tags.all()]) return render_template( 'ctrl/edit_post.html', form=form, datetimepicker=datetime.utcnow() )
038b5f15125bd2263f8ea6a677f3de9a22edba04
3,659,321
def maxindices(l): """ Get indices for all occurences of maximal element in list :param l: :return: """ max_indices = [] max_value = l[0] #Assume un-exhaustible iterator for i, v in enumerate(l): if v > max_value: max_value = v max_indices = [i] elif v == max_value: max_indices.append(i) return max_indices
b2f155fa97455c0327b2717591ebea2176773012
3,659,324
def get_grid_extents(data, edges=True): """ Get min and max lat and lon from an input GEOS-Chem xarray dataset or grid dict Args: data: xarray Dataset or dict A GEOS-Chem dataset or a grid dict edges (optional): bool Whether grid extents should use cell edges instead of centers Default value: True Returns: minlon: float Minimum longitude of data grid maxlon: float Maximum longitude of data grid minlat: float Minimum latitude of data grid maxlat: float Maximum latitude of data grid """ if isinstance(data, dict): if "lon_b" in data and edges: return np.min( data["lon_b"]), np.max( data["lon_b"]), np.min( data["lat_b"]), np.max( data["lat_b"]) elif not edges: return np.min( data["lon"]), np.max( data["lon"]), np.min( data["lat"]), np.max( data["lat"]) else: return -180, 180, -90, 90 elif "lat" in data.dims and "lon" in data.dims: lat = data["lat"].values lon = data["lon"].values if lat.size / 6 == lon.size: # No extents for CS plots right now return -180, 180, -90, 90 else: lat = np.sort(lat) minlat = np.min(lat) if abs(abs(lat[1]) - abs(lat[0]) ) != abs(abs(lat[2]) - abs(lat[1])): #pole is cutoff minlat = minlat - 1 maxlat = np.max(lat) if abs(abs(lat[-1]) - abs(lat[-2]) ) != abs(abs(lat[-2]) - abs(lat[-3])): maxlat = maxlat + 1 # add longitude res to max longitude lon = np.sort(lon) minlon = np.min(lon) maxlon = np.max(lon) + abs(abs(lon[-1]) - abs(lon[-2])) return minlon, maxlon, minlat, maxlat else: # GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim return -180, 180, -90, 90
c8cbef8b0dc3f6ce9c009955c2eff88fd5011ded
3,659,325
def millisToNanos(millis): """ Converts milliseconds to nanoseconds. :param millis: (long) - The long milliseconds value to convert. :return: (long) QueryConstants.NULL_LONG if the input is equal to QueryConstants.NULL_LONG. Throws DBTimeUtils.DBDateTimeOverflowException if the input is too large for conversion. Otherwise returns a long of the equivalent number of nanoseconds to the input. """ return _java_type_.millisToNanos(millis)
4f659a6d994551c0ce72875009d688cb7c91571d
3,659,326
def recursive_seed_part( graph, parts, pop_target, pop_col, epsilon, method=bipartition_tree, node_repeats=1, n=None, ceil=None ): """ Returns a partition with ``num_dists`` districts balanced within ``epsilon`` of ``pop_target`` by recursively splitting graph using recursive_seed_part_inner. :param graph: The graph :param parts: Iterable of part labels (like ``[0,1,2]`` or ``range(4)`` :param pop_target: Target population for each part of the partition :param pop_col: Node attribute key holding population data :param epsilon: How far (as a percentage of ``pop_target``) from ``pop_target`` the parts of the partition can be :param method: Function used to find balanced partitions at the 2-district level :param node_repeats: Parameter for :func:`~gerrychain.tree_methods.bipartition_tree` to use. :param n: Either a positive integer (greater than 1) or None. If n is a positive integer, this function will recursively create a seed plan by either biting off districts from graph or dividing graph into n chunks and recursing into each of these. If n is None, this function prime factors ``num_dists``=n_1*n_2*...*n_k (n_1 > n_2 > ... n_k) and recursively partitions graph into n_1 chunks. :param ceil: Either a positive integer (at least 2) or None. Relevant only if n is None. If ``ceil`` is a positive integer then finds the largest factor of ``num_dists`` less than or equal to ``ceil``, and recursively splits graph into that number of chunks, or bites off a district if that number is 1. :return: New assignments for the nodes of ``graph``. :rtype: dict """ flips = {} assignment = recursive_seed_part_inner( graph, len(parts), pop_target, pop_col, epsilon, method=bipartition_tree, node_repeats=node_repeats, n=n, ceil=ceil ) for i in range(len(assignment)): for node in assignment[i]: flips[node] = parts[i] return flips
8d2517e74d8726696d865ea4993a10602af74450
3,659,327
def codegen_reload_data(): """Parameters to codegen used to generate the fn_carbon_black_cloud_devices package""" reload_params = {"package": u"fn_carbon_black_cloud_devices", "incident_fields": [], "action_fields": [], "function_params": [u"carbon_black_device_query_string", u"incident_id"], "datatables": [], "message_destinations": [u"carbon_black_cloud"], "functions": [u"carbon_black_cloud_devices_quarantine"], "phases": [], "automatic_tasks": [], "scripts": [], "workflows": [u"example_carbon_black_cloud_devices_quarantine"], "actions": [u"Example: Run Carbon Black Device Quarantine"], "incident_artifact_types": [] } return reload_params
0f116dd5c9f7496af86dde2afbdd3442904dc40f
3,659,328
def validate_investment_amount(investment_amount, intent_request): """ Validates the investment_amount provided by the user. """ # Validate the investment_amount should be equal to or greater than 5000. if investment_amount is not None: investment_amount = parse_int( investment_amount ) # Since parameters are strings it's important to cast values if investment_amount < 5000: return build_validation_result( False, "investmentAmount", "The investmentAmount should be greater than or equal to 5000, " "Please provide a correct investmentAmount in dollars.", ) # A True results is returned if age or amount are valid return build_validation_result(True, None, None)
6f645d196e452377f4c16ae585ecc113ae5997b0
3,659,329
def new_id(): """ Generates new bson ObjectId """ return str(ObjectId())
aa02c802abf937720119f9843e55395d485b11c1
3,659,330
import lumapi import pathlib import json def gc_sweep( session=None, draw_function=gc2d, dirpath=CONFIG["workspace"], overwrite=False, run=True, base_fsp_path=str(CONFIG["grating_coupler_2D_base"]), **kwargs ): """ grating coupler sweep grating_coupler_2D_base optimizes Transmission and does not calculate Sparameters """ function_name = draw_function.__name__ + "_sweep" filename = kwargs.pop("name", get_function_name(function_name, **kwargs)) dirpath = pathlib.Path(dirpath) / function_name dirpath.mkdir(exist_ok=True) filepath = dirpath / filename filepath_sim_settings = filepath.with_suffix(".settings.json") filepath_json = filepath.with_suffix(".json") filepath_fsp = str(filepath.with_suffix(".fsp")) if filepath_json.exists() and not overwrite and run: return json.loads(open(filepath_json).read()) s = session or lumapi.FDTD(hide=False) simdict = draw_function(session=s, base_fsp_path=base_fsp_path, **kwargs) s.save(filepath_fsp) if not run: return s.run() T = s.getresult("fom", "T") results = dict(wavelength_nm=list(T["lambda"].ravel() * 1e9), T=list(T["T"])) with open(filepath_json, "w") as f: json.dump(results, f) settings = simdict.get("settings") if settings: with open(filepath_sim_settings, "w") as f: json.dump(settings, f) return results
ae22a103adb2d440f25b14eac1d681d591237b67
3,659,331
def stem(word, stemmer=PORTER, **kwargs): """ Returns the base form of the word when counting words in count(). With stemmer=PORTER, the Porter2 stemming algorithm is used. With stemmer=LEMMA, either uses Word.lemma or inflect.singularize(). (with optional parameter language="en", pattern.en.inflect is used). """ if hasattr(word, "string") and stemmer in (PORTER, None): word = word.string if isinstance(word, basestring): word = decode_utf8(word.lower()) if stemmer is None: return word.lower() if stemmer == PORTER: return _stemmer.stem(word, **kwargs) if stemmer == LEMMA: if hasattr(word, "lemma"): # pattern.en.Word w = word.string.lower() if word.lemma is not None: return word.lemma if word.pos == "NNS": return singularize(w) if word.pos.startswith(("VB", "MD")): return conjugate(w, "infinitive") or w if word.pos.startswith(("JJ",)): return predicative(w) if word.pos.startswith(("DT", "PR", "WP")): return singularize(w, pos=word.pos) return w return singularize(word, pos=kwargs.get("pos", "NN")) if hasattr(stemmer, "__call__"): return decode_utf8(stemmer(word)) return word.lower()
71ac3a3ee30a226fcf13b2d4a3288003feeb7c3e
3,659,332
def verify_bounce_message(msg): """ Verify an SES/SNS bounce notification message. """ verifier = BounceMessageVerifier(msg) return verifier.is_verified()
c181e82d5748ed6a310650730bc1fc94cde8e33d
3,659,333
def multiples(a, b): """This function checks if a number is a multiple of another.""" if type(a) != int or type(b) != int: raise Exception('Values must be integers.') elif a == 0: raise Exception('0 is not valid.') elif a == b: raise Exception('Numbers should not be the same.') else: if b > a: check = b % a if not check: return True else: return False else: raise Exception("Error! {0} isn't greater than {1}." .format(b, a))
3f8bccd5429b5d307c0a018b7186bd75a76e996a
3,659,334
def unquote_keys(data): """Restores initial view of 'quoted' keys in dictionary data :param data: is a dictionary :return: data with restored keys if they were 'quoted'. """ if isinstance(data, dict): for key, value in data.items(): if isinstance(value, dict): unquote_keys(value) if key.startswith('%24'): k = parse.unquote(key) data[k] = data.pop(key) return data
de3802fdf0b278fcb39870f49ec7435ae5a63f38
3,659,336
def retr_amplslen(peri, radistar, masscomp, massstar): """ Calculate the self-lensing amplitude. Arguments peri: orbital period [days] radistar: radius of the star [Solar radius] masscomp: mass of the companion [Solar mass] massstar: mass of the star [Solar mass] Returns amplslen: the fractional amplitude of the self-lensing """ amplslen = 7.15e-5 * radistar**(-2.) * peri**(2. / 3.) * masscomp * (masscomp + massstar)**(1. / 3.) * 1e3 # [ppt] return amplslen
32c0618f0e5965357fbcadd090443d0baf0e65bd
3,659,337
from datetime import datetime def calculate_current_teach_week(semester_first_week_date='2021-3-08 08:00:00'): """ 计算当前日期所属教学周,实现思路是:当前日期所属一年中的周 - 每学期的第一周 ---- param: semester_first_week_date: 学期第一周的日期,例如 '2021-3-08 08:00:00' return: 当前教学周 """ # 获取指定日期属于当年的第几周, 返回字符串 semester_first_week = datetime.strptime(semester_first_week_date, '%Y-%m-%d %H:%M:%S').strftime('%W') # 获取当前日期是一年中的第几周, 返回字符串 current_year_week = datetime.now().strftime('%W') # 计算当前日期所属的教学周 # ( ) 中的减一表示第一周之前的周数 # 最后加一是因为计算周数是从索引00开始的,所以需要加1 current_teach_week = int(current_year_week) - (int(semester_first_week) - 1) + 1 return current_teach_week
01a8df84b878e192dae1b1d0d38d78fb5c19f93e
3,659,339
def get_first_model_each_manufacturer(cars=cars): """return a list of matching models (original ordering)""" return [cars[key][0] for key in cars]
639cd912a68149864f4d0ff6c1b2dc7bc911052f
3,659,340
import re def get_sandbox_table_name(dataset_id, rule_name): """ A helper function to create a table in the sandbox dataset :param dataset_id: the dataset_id to which the rule is applied :param rule_name: the name of the cleaning rule :return: the concatenated table name """ return '{dataset_id}_{rule_name}'.format(dataset_id=dataset_id, rule_name=re.sub( r'\W', '_', rule_name))
ee07d40f885cb9d6d0d34cc0215620a2572b6b5f
3,659,341
def index(): """for i in range(0, 30): data = QuizQuestions("math", None, "en_US", 7, "normal", "This is placeholder question number " + str(i), "c", "Answer A", "Answer B", "Answer C", "Answer D", True) db.session.add(data) db.session.commit()""" return render_template("quiz_index.html")
5be34099cead47e4d339edf14bdc39519e7242f5
3,659,342
from typing import Any def _get_invoke_function_name() -> Any: """ Get invoke function Name. Returns ------- Function Name. """ props = get_properties() functionName = f"orbit-{props['AWS_ORBIT_ENV']}-{props['AWS_ORBIT_TEAM_SPACE']}-container-runner" return functionName
387ed3d80e3fedcee4662ec9db62622bb8393aba
3,659,343
def sigmaG(a, axis=None, overwrite_input=False, keepdims=False): """ Compute the rank-based estimate of the standard deviation Parameters ---------- a : array_like Array containing numbers whose mean is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the means are computed. The default is to compute the mean of the flattened array. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. Note that, if `overwrite_input` is True and the input is not already an array, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- median : ndarray, see dtype parameter above array containing the median values sigmaG : ndarray, see dtype parameter above. array containing the robust estimator of the standard deviation See Also -------- median_sigmaG : robust rank-based estimate of mean and standard deviation Notes ----- This routine uses a single call to ``np.nanpercentile`` to find the quartiles along the given axis, and uses these to compute the sigmaG, a robust estimate of the standard deviation sigma: sigmaG = 0.7413 * (q75 - q25) where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5)) """ q25, q75 = np.nanpercentile(a, [25, 75], axis=axis, overwrite_input=overwrite_input) sigmaG = sigmaG_factor * (q75 - q25) if keepdims: if axis is None: newshape = a.ndim * (1,) else: newshape = np.asarray(a.shape) newshape[axis] = 1 sigmaG = sigmaG.reshape(newshape) return sigmaG
7731a1ad94f85baf02125479fc96e89a59c8b594
3,659,344
def traverse_tagged_databases( functional_unit, method, label="tag", default_tag="other", secondary_tags=[], fg_databases=None ): """Traverse a functional unit throughout its foreground database(s) or the listed databses in fg_databses, and group impacts by tag label. Contribution analysis work by linking impacts to individual activities. However, you also might want to group impacts in other ways. For example, give individual biosphere exchanges their own grouping, or aggregate two activities together. Consider this example system, where the letters are the tag labels, and the numbers are exchange amounts. The functional unit is one unit of the tree root. .. image:: images/tagged-traversal.png :alt: Example tagged supply chain In this supply chain, tags are applied to activities and biosphere exchanges. If a biosphere exchange is not tagged, it inherits the tag of its producing activity. Similarly, links to other databases are assessed with the usual LCA machinery, and the total LCA score is tagged according to its consuming activity. If an activity does not have a tag, a default tag is applied. We can change our visualization to show the use of the default tags: .. image:: images/tagged-traversal-2.png :alt: Example tagged supply chain And then we can manually calculate the tagged impacts. Normally we would need to know the actual biosphere flows and their respective characterization factors (CF), but in this example we assume that each CF is one. Our result, group by tags, would therefore be: * **A**: :math:`6 + 27 = 33` * **B**: :math:`30 + 44 = 74` * **C**: :math:`5 + 16 + 48 = 69` * **D**: :math:`14` This function will only traverse the foreground database, i.e. the database of the functional unit activity. A functional unit can have multiple starting nodes; in this case, all foreground databases are traversed. Input arguments: * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``. * ``method``: A method name, e.g. ``("foo", "bar")`` * ``label``: The label of the tag classifier. Default is ``"tag"`` * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"`` * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list. * ``fg_databases``: a list of foreground databases to be traversed, e.g. ['foreground', 'biomass', 'machinery'] It's not recommended to include all databases of a project in the list to be traversed, especially not ecoinvent itself Returns: Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``. """ lca = LCA(functional_unit, method) lca.lci() lca.lcia() method_dict = {o[0]: o[1] for o in Method(method).load()} graph = [ recurse_tagged_database( key, amount, method_dict, lca, label, default_tag, secondary_tags, fg_databases ) for key, amount in functional_unit.items() ] return aggregate_tagged_graph(graph), graph
02edd2f9b33760a730ea7906240b48418059430c
3,659,345
def graft( repo, ctx, base=None, labels=None, keepparent=False, keepconflictparent=False, wctx=None, ): """Do a graft-like merge. This is a merge where the merge ancestor is chosen such that one or more changesets are grafted onto the current changeset. In addition to the merge, this fixes up the dirstate to include only a single parent (if keepparent is False) and tries to duplicate any renames/copies appropriately. ctx - changeset to rebase base - merge base, or ctx.p1() if not specified labels - merge labels eg ['local', 'graft'] keepparent - keep second parent if any keepconflictparent - if unresolved, keep parent used for the merge """ # If we're grafting a descendant onto an ancestor, be sure to pass # mergeancestor=True to update. This does two things: 1) allows the merge if # the destination is the same as the parent of the ctx (so we can use graft # to copy commits), and 2) informs update that the incoming changes are # newer than the destination so it doesn't prompt about "remote changed foo # which local deleted". # We also pass mergeancestor=True when base is the same revision as p1. 2) # doesn't matter as there can't possibly be conflicts, but 1) is necessary. wctx = wctx or repo[None] pctx = wctx.p1() base = base or ctx.p1() mergeancestor = ( repo.changelog.isancestor(pctx.node(), ctx.node()) or pctx.rev() == base.rev() ) stats = _update( repo, ctx.node(), True, True, base.node(), mergeancestor=mergeancestor, labels=labels, wc=wctx, ) if keepconflictparent and stats.unresolvedcount: pother = ctx.node() else: pother = nullid parents = ctx.parents() if keepparent and len(parents) == 2 and base in parents: parents.remove(base) pother = parents[0].node() # Never set both parents equal to each other if pother == pctx.node(): pother = nullid if wctx.isinmemory(): wctx.setparents(pctx.node(), pother) # fix up dirstate for copies and renames copies.graftcopies(wctx, ctx, base) else: with repo.dirstate.parentchange(): repo.setparents(pctx.node(), pother) repo.dirstate.write(repo.currenttransaction()) # fix up dirstate for copies and renames copies.graftcopies(wctx, ctx, base) return stats
b715ead11ea83c61eff52a670396885cd6373739
3,659,346
def GetSiteFilters(filename): """ Reader for a file of reportable sites. The file contains 2 tokens: the site name and a normalization factor. The method returns a hash table with the key being site and the value the normalization factor to use. """ try: #--- process the reportable sites file --- sites = {} fd = open(filename) while 1: filter = fd.readline() if filter == "": # EOF break filter = filter.strip().strip("\n") if filter.startswith("#"): continue if len(filter) == 0: continue site = filter.split() if sites.has_key(site[0]): raise Exception("System error: duplicate - site (%s) already set" % site[0]) factor = 0 if len(site) == 1: raise Exception("System error: No normalization factory was provide for site: %s" % site[0]) elif len(site) > 1: #-- verify the factor is an integer -- try: tmp = int(site[1]) factor = float(site[1])/1000 except: raise Exception("Error in %s file: 2nd token must be an integer (%s" % (filename,filter)) #-- set the factor -- sites[site[0]] = factor else: continue #-- end of while loop -- fd.close() #-- verify there is at least 1 site -- if len(sites) == 0: raise Exception("Error in %s file: there are no sites to process" % filename) return sites except IOError, (errno,strerror): raise Exception("IO error(%s): %s (%s)" % (errno,strerror,filename))
5f5e02b4213a060ca2ea2485d9f4ce4a09e9995f
3,659,347
import warnings def MiniMobileNetV2(input_shape=None, alpha=1.0, expansion_factor=6, depth_multiplier=1, dropout=0., weight_decay=0., include_top=True, weights=None, input_tensor=None, pooling=None, classes=10): """Instantiates the MobileNet architecture. MobileNet V2 is from the paper: - [Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381) Note that only TensorFlow is supported for now, therefore it only works with the data format `image_data_format='channels_last'` in your Keras config at `~/.keras/keras.json`. To load a MobileNet model via `load_model`, import the custom objects `relu6` and `DepthwiseConv2D` and pass them to the `custom_objects` parameter. E.g. model = load_model('mobilenet.h5', custom_objects={ 'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D}) # Arguments input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or (3, 224, 224) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. expansion_factor: controls the expansion of the internal bottleneck blocks. Should be a positive integer >= 1 depth_multiplier: depth multiplier for depthwise convolution (also called the resolution multiplier) dropout: dropout rate weight_decay: Weight decay factor. include_top: whether to include the fully-connected layer at the top of the network. weights: `None` (random initialization) or `imagenet` (ImageNet weights) input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. # Returns A Keras model instance. # Raises ValueError: in case of invalid argument for `weights`, or invalid input shape. RuntimeError: If attempting to run this model with a backend that does not support separable convolutions. """ if K.backend() != 'tensorflow': raise RuntimeError('Only Tensorflow backend is currently supported, ' 'as other backends do not support ' 'depthwise convolution.') if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as ImageNet with `include_top` ' 'as true, `classes` should be 1000') # Determine proper input shape and default size. if input_shape is None: default_size = 224 else: if K.image_data_format() == 'channels_first': rows = input_shape[1] cols = input_shape[2] else: rows = input_shape[0] cols = input_shape[1] if rows == cols and rows in [96, 128, 160, 192, 224]: default_size = rows else: default_size = 224 input_shape = _obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=K.image_data_format(), require_flatten=include_top or weights) if K.image_data_format() == 'channels_last': row_axis, col_axis = (0, 1) else: row_axis, col_axis = (1, 2) rows = input_shape[row_axis] cols = input_shape[col_axis] if weights == 'imagenet': if depth_multiplier != 1: raise ValueError('If imagenet weights are being loaded, ' 'depth multiplier must be 1') if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]: raise ValueError('If imagenet weights are being loaded, ' 'alpha can be one of' '`0.35`, `0.50`, `0.75`, `1.0`, `1.3` and `1.4` only.') if rows != cols or rows not in [96, 128, 160, 192, 224]: raise ValueError('If imagenet weights are being loaded, ' 'input must have a static square shape (one of ' '(06, 96), (128,128), (160,160), (192,192), or ' '(224, 224)).Input shape provided = %s' % (input_shape,)) if K.image_data_format() != 'channels_last': warnings.warn('The MobileNet family of models is only available ' 'for the input data format "channels_last" ' '(width, height, channels). ' 'However your settings specify the default ' 'data format "channels_first" (channels, width, height).' ' You should set `image_data_format="channels_last"` ' 'in your Keras config located at ~/.keras/keras.json. ' 'The model being returned right now will expect inputs ' 'to follow the "channels_last" data format.') K.set_image_data_format('channels_last') old_data_format = 'channels_first' else: old_data_format = None if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor x = _conv_block(img_input, 32, alpha, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay) x = _depthwise_conv_block_v2(x, 16, alpha, 1, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=1) x = _depthwise_conv_block_v2(x, 24, alpha, expansion_factor, depth_multiplier, block_id=2, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, strides=(2, 2)) x = _depthwise_conv_block_v2(x, 24, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=3) x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, block_id=4, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay) x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=5) x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=6) x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, block_id=7, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, strides=(2, 2)) x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=8) x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=9) x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, block_id=10) if alpha <= 1.0: penultimate_filters = 1280 else: penultimate_filters = int(1280 * alpha) x = _conv_block(x, penultimate_filters, alpha=1.0, kernel=(1, 1), bn_epsilon=1e-3, bn_momentum=0.99, block_id=18) if include_top: if K.image_data_format() == 'channels_first': shape = (penultimate_filters, 1, 1) else: shape = (1, 1, penultimate_filters) x = GlobalAveragePooling2D()(x) x = Reshape(shape, name='reshape_1')(x) x = Dropout(dropout, name='dropout')(x) x = Conv2D(classes, (1, 1), kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay), padding='same', name='conv_preds')(x) x = Activation('softmax', name='act_softmax')(x) x = Reshape((classes,), name='reshape_2')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='mobilenetV2_%0.2f_%s' % (alpha, rows)) # load weights if weights == 'imagenet': if K.image_data_format() == 'channels_first': raise ValueError('Weights for "channels_last" format ' 'are not available.') if alpha == 1.0: alpha_text = '1_0' elif alpha == 1.3: alpha_text = '1_3' elif alpha == 1.4: alpha_text = '1_4' elif alpha == 0.75: alpha_text = '7_5' elif alpha == 0.50: alpha_text = '5_0' else: alpha_text = '3_5' if include_top: model_name = 'mobilenet_v2_%s_%d_tf.h5' % (alpha_text, rows) weigh_path = BASE_WEIGHT_PATH_V2 + model_name weights_path = get_file(model_name, weigh_path, cache_subdir='models') else: model_name = 'mobilenet_v2_%s_%d_tf_no_top.h5' % (alpha_text, rows) weigh_path = BASE_WEIGHT_PATH_V2 + model_name weights_path = get_file(model_name, weigh_path, cache_subdir='models') model.load_weights(weights_path) if old_data_format: K.set_image_data_format(old_data_format) return model
695d592f3b8e75f9d416702c3e2b74f8f608e211
3,659,348
import logging def update_forward_cnt(**kwargs): """ 更新被转发的次数,进行加1操作 :param kwargs: {'object_id': object_id} :return: """ session = None try: session = get_session() # 转发次数 +1 session.query(SecondHand).filter(SecondHand.OBJECT_ID == kwargs['object_id']).update( {SecondHand.FORWARD_CNT: SecondHand.FORWARD_CNT + 1}) # 提交到数据库 session.commit() logging.info('OK : second_hand.py--->update_forward_cnt() 成功') return RESULT_OK except Exception as e: session.rollback() logging.critical('Error : second_hand.py--->update_forward_cnt() 失败 :{}'.format(e)) return [] finally: session.close()
f6525dfa61e6fb2a5c39e344a6528d5f581393cd
3,659,349
import copy def recursive_dict_merge(dict1, dict2): """ Merges dictionaries (of dictionaries). Preference is given to the second dict, i.e. if a key occurs in both dicts, the value from `dict2` is used. """ result = copy.deepcopy(dict1) for key in dict2: if key in dict1 and isinstance(dict1[key], dict) and isinstance(dict2[key], dict): result[key] = recursive_dict_merge(dict1[key], dict2[key]) else: result[key] = dict2[key] return result
fbcb51ad47de0dd4d1c95cd59873918187736b63
3,659,350
import torch def define_loss(name, device="cuda"): """ Defines the loss function associated to the name. Supports losses in the LOSSES list, as well as the Lovasz, Softdice and Haussdorf losses. Args: name (str): Loss name. device (str, optional): Device for torch. Defaults to "cuda". Raises: NotImplementedError: Specified loss name is not supported. Returns: torch loss: Loss function """ if name in LOSSES: loss = getattr(torch.nn, name)(reduction="none") elif name == "lovasz": loss = lovasz_loss else: raise NotImplementedError return loss
b3705a116af18dbe7d4a8cf8539c544e057a08d6
3,659,351
def get_atomate_wflows(wf_coll, states, seed_regex=None, project_regex=None) -> pd.DataFrame: """Obtain workflow informaton for atomate jobs""" return get_workflows(wf_coll, ['atomate-relax'], states, seed_regex=seed_regex, project_regex=project_regex)
131609b7360ed6f378235b2b0c34268bbce5b641
3,659,356
def the_H_function(sorted_citations_list, n=1): """from a list of integers [n1, n2 ..] representing publications citations, return the max list-position which is >= integer eg >>> the_H_function([10, 8, 5, 4, 3]) => 4 >>> the_H_function([25, 8, 5, 3, 3]) => 3 >>> the_H_function([1000, 20]) => 2 """ if sorted_citations_list and sorted_citations_list[0] >= n: return the_H_function(sorted_citations_list[1:], n + 1) else: return n - 1
24ad3d85963ef0a9d4531ba552371d7e829f1c2a
3,659,357
from pydantic import BaseModel # noqa: E0611 import importlib def build_trainer(model: BaseModel, params: Parameters, dataset: Dataset, target_processor: TargetProcessor, batch_processor: BatchProcessor) \ -> BaseTrainer: """ Build a neural network trainer/optimizer based on different backend :param model: Model (inherited from nnimgproc.model.BaseModel) :param params: Parameters (from nnimgproc.util.parameters), training parameter set such as learning rate :param dataset: Dataset (from nnimgproc.dataset), image provider :param target_processor: TargetProcessor (from nnimgproc.processor) :param batch_processor: BatchProcessor (from nnimgproc.processor) :return: Trainer (from nnimgproc.trainer) """ lib = importlib.import_module('nnimgproc.backend.%s' % model.backend) return lib.Trainer(model, params, dataset, target_processor, batch_processor)
b9a1ede91818e024b4a0932f13e37cda4b9b6d28
3,659,358
import random from bs4 import BeautifulSoup def get_random_quote(quotes_list): """Return a random quote to user.""" upper_limit = len(quotes_list)-1 select = random.randint(0, upper_limit) selected_quote = quotes_list[select] soup = BeautifulSoup(selected_quote, 'html.parser') return soup.text
c50f99640da88319c2b643b0fe1c386206c0c00b
3,659,359
from unittest.mock import patch def record_states(hass): """Record some test states. We inject a bunch of state updates from media player, zone and thermostat. """ mp = "media_player.test" mp2 = "media_player.test2" mp3 = "media_player.test3" therm = "thermostat.test" therm2 = "thermostat.test2" zone = "zone.home" script_c = "script.can_cancel_this_one" def set_state(entity_id, state, **kwargs): """Set the state.""" hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) zero = dt_util.utcnow() one = zero + timedelta(seconds=1) two = one + timedelta(seconds=1) three = two + timedelta(seconds=1) four = three + timedelta(seconds=1) states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []} with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one): states[mp].append( set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)}) ) states[mp].append( set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)}) ) states[mp2].append( set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)}) ) states[mp3].append( set_state(mp3, "idle", attributes={"media_title": str(sentinel.mt1)}) ) states[therm].append( set_state(therm, 20, attributes={"current_temperature": 19.5}) ) with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two): # This state will be skipped only different in time set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt3)}) # This state will be skipped because domain is excluded set_state(zone, "zoning") states[script_c].append( set_state(script_c, "off", attributes={"can_cancel": True}) ) states[therm].append( set_state(therm, 21, attributes={"current_temperature": 19.8}) ) states[therm2].append( set_state(therm2, 20, attributes={"current_temperature": 19}) ) with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three): states[mp].append( set_state(mp, "Netflix", attributes={"media_title": str(sentinel.mt4)}) ) states[mp3].append( set_state(mp3, "Netflix", attributes={"media_title": str(sentinel.mt3)}) ) # Attributes changed even though state is the same states[therm].append( set_state(therm, 21, attributes={"current_temperature": 20}) ) return zero, four, states
eb24d3ce56aa2ba4df9423107252bdd9142e0861
3,659,360
def ask_why(doc): """ Ask questions of the form “Why is ..x..?” where x is either a combination of object and adjective or subject and adjective or “Why ..prep.. the ..noun..” """ chunk = find_subj_chunk(doc) if chunk != None and chunk["adjective"] != None: subj = chunk["subject"] adj = chunk["adjective"] respond = "Why is {} {}?".format(subj, adj) return respond chunk = find_obj_chunk(doc) if chunk != None and chunk["adjective"] != None: subj = chunk["objective"] adj = chunk["adjective"] respond = "Why is {} {}?".format(subj, adj) return respond # I had similar experience in high school --> why in high school? chunk = find_prep_chunk(doc) if chunk != None: subj = chunk["full_subject"] prep = chunk["prep"] respond = "Why {} the {}?".format(prep, subj) return respond return None
64ef365f5ebd64dff4384cc558ecfa7856661fdd
3,659,361
def get_extension(fname): """ Get file extension. """ return '.' + fname.split(".")[-1]
9fa6f63d848aa7781b55e9cc384c9a8cb9665c69
3,659,362
def large_xyz_to_luv_star(large_xyz, white_xyz): """ # 概要 XYZ から L*u*v* を計算する。 # 参考 https://en.wikipedia.org/wiki/CIELUV """ large_x, large_y, large_z = np.dsplit(large_xyz, 3) white_xyz = np.array(white_xyz) white_xyz = (white_xyz / white_xyz[1]).reshape((1, 1, 3)) x_n, y_n, z_n = np.dsplit(white_xyz, 3) threshold = (6/29) ** 3 judge = (large_y / y_n) l_lower = (judge <= threshold) * (((29/3) ** 3) * (large_y / y_n)) l_upper = (judge > threshold) * (116 * ((large_y / y_n) ** (1/3)) - 16) l_star = l_lower + l_upper u_dash, v_dash = np.dsplit(large_xyz_to_uv_dash(large_xyz), 2) u_n_dash, v_n_dash = np.dsplit(large_xyz_to_uv_dash(white_xyz), 2) u_star = 13 * l_star * (u_dash - u_n_dash) v_star = 13 * l_star * (v_dash - v_n_dash) return np.dstack((l_star, u_star, v_star))
689c13d99c7e8b279c6cd718aad33fce8baa5a67
3,659,363
def rotation(new_rotation=0): """Set the display rotation. :param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270 """ global _rotation if new_rotation in [0, 90, 180, 270]: _rotation = new_rotation return True else: raise ValueError("Rotation: 0, 90, 180 or 270 degrees only")
4f12a90e104ef66e50520523d23b3fff421fa991
3,659,364
def parse_url (url:str) -> str: """ 规范化 URL -> hello/world <- /hello/world """ if url == "": url = "/" if not url.startswith ('/'): url = "/" + url # 添加开头斜杠 # if not url.endswith ("/"): url += "/" # 添加末尾斜杠 return url
dd2ace64bd5926f2b20a77c81a1e885e8a4d3d2b
3,659,365
def bulk_edit(modeladmin, request, queryset): """ Bulk edit selected items. """ form = None if 'apply' in request.POST: form = BulkEditForm(request.POST) if form.is_valid(): property = form.cleaned_data['property'] cf_value = form.cleaned_data['cf_value'] ff_value = form.cleaned_data['ff_value'] inline_notes = form.cleaned_data['inline_notes'] footnotes = form.cleaned_data['footnotes'] overwrite = form.cleaned_data['overwrite'] delete_only = form.cleaned_data['delete_only'] if queryset.model is Subject: entity_type = 'SO' elif queryset.model is Location: entity_type = 'SL' elif queryset.model is Media: entity_type = 'MP' elif queryset.model is File: entity_type = 'MF' else: entity_type = 'PO' if not delete_only and (property.control_field and not cf_value): modeladmin.message_user(request, 'UPDATE FAILED: If you would like to update a Controlled Property, you must selected a Controlled Term', level=messages.ERROR) return HttpResponseRedirect(request.get_full_path()) elif not delete_only and (not property.control_field and (not ff_value or ff_value == '')): modeladmin.message_user(request, 'UPDATE FAILED: If you would like to update a Free-Form Property, you must provide a Free-Form value', level=messages.ERROR) return HttpResponseRedirect(request.get_full_path()) if property.primary_type != 'AL' and property.primary_type != entity_type: modeladmin.message_user(request, 'UPDATE FAILED: You selected a property which is not available for this Entity. If you would like to make it available, go to the Descriptive Property table and change Primary Type to All', level=messages.ERROR) return HttpResponseRedirect(request.get_full_path()) if cf_value and cf_value.type != property: modeladmin.message_user(request, 'UPDATE FAILED: You selected a Controlled Term that is not a value for the selected Property', level=messages.ERROR) return HttpResponseRedirect(request.get_full_path()) for item in queryset: if queryset.model is Subject: if property.control_field: if overwrite or delete_only: control_props = SubjectControlProperty.objects.filter(subject = item, control_property = property) for cp in control_props: cp.delete() if not delete_only: new_cp = SubjectControlProperty(subject = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_cp.save() else: if overwrite or delete_only: ff_props = SubjectProperty.objects.filter(subject = item, property = property) for fp in ff_props: fp.delete() if not delete_only: new_fp = SubjectProperty(subject = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_fp.save() elif queryset.model is Location: if property.control_field: if overwrite or delete_only: control_props = LocationControlProperty.objects.filter(location = item, control_property = property) for cp in control_props: cp.delete() if not delete_only: new_cp = LocationControlProperty(location = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_cp.save() else: if overwrite or delete_only: ff_props = LocationProperty.objects.filter(location = item, property = property) for fp in ff_props: fp.delete() if not delete_only: new_fp = LocationProperty(location = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_fp.save() elif queryset.model is Media: if property.control_field: if overwrite or delete_only: control_props = MediaControlProperty.objects.filter(media = item, control_property = property) for cp in control_props: cp.delete() if not delete_only: new_cp = MediaControlProperty(media = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_cp.save() else: if overwrite or delete_only: ff_props = MediaProperty.objects.filter(media = item, property = property) for fp in ff_props: fp.delete() if not delete_only: new_fp = MediaProperty(media = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_fp.save() elif queryset.model is PersonOrg: if property.control_field: if overwrite or delete_only: control_props = PersonOrgControlProperty.objects.filter(person_org = item, control_property = property) for cp in control_props: cp.delete() if not delete_only: new_cp = PersonOrgControlProperty(person_org = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_cp.save() else: if overwrite or delete_only: ff_props = PersonOrgProperty.objects.filter(person_org = item, property = property) for fp in ff_props: fp.delete() if not delete_only: new_fp = PersonOrgProperty(person_org = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_fp.save() elif queryset.model is File: if property.control_field: if overwrite or delete_only: control_props = FileControlProperty.objects.filter(file = item, control_property = property) for cp in control_props: cp.delete() if not delete_only: new_cp = FileControlProperty(file = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_cp.save() else: if overwrite or delete_only: ff_props = FileProperty.objects.filter(file = item, property = property) for fp in ff_props: fp.delete() if not delete_only: new_fp = FileProperty(file = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user) new_fp.save() modeladmin.message_user(request, _("%s %s." % ('Selected property edited: ', property))) return HttpResponseRedirect(request.get_full_path()) if not form: form = BulkEditForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)}) opts = queryset.model._meta app_label = opts.app_label return render_to_response( 'admin/bulk_edit.html', {'items': queryset, 'bulk_edit_form': form, "opts": opts, "app_label": app_label}, context_instance = RequestContext(request) )
069eeb1a32d91bf7eeb055fa4014210f52e4792b
3,659,366
def smooth_GF(C,S,avg_rad, start_deg): """from Wahr et al: Time-variable gravity recovery from space eq. 34. This is Jekeli's [1981] smoothing method.""" C_smooth = C S_smooth = S Re = 6378.1363; # Radius of Earth in km b = np.log(2) / (1 - np.cos(avg_rad / Re)) W=[] W.append(1 / (2 * np.pi)) W.append(1 / (2 * np.pi) * ((1 + np.exp(-2 * b)) / (1 - np.exp(-2 * b)) - 1 / b)) for j in range(start_deg,C.shape[0]): w = (-(2*(j-1)+1)/b*W[j-1]) + W[j-2] W.append(w) if W[j] < 0.: W[j] = 0. if W[j-1] == 0.: W[j] = 0. for i in range(start_deg-1,C.shape[0]): C_smooth[i,:]=C[i,:]*W[i]*2.*np.pi S_smooth[i,:] = S[i,:]*W[i]*2.*np.pi return C_smooth, S_smooth
89c0edfe4af3ef476485517e06363acb09be4fda
3,659,367
def get_vmstat(): """ Get and format the content of /proc/vmstat """ buf = open("/proc/vmstat").read() buf = [v.replace(' ', ":") for v in buf.split("\n")] buf = ";".join(buf) return buf
b2db72bbc3b143ff1ba37ee7e2dcc33295d4a4ea
3,659,368
def upcomingIPOs( symbol="", exactDate="", token="", version="stable", filter="", format="json", ): """This will return all upcoming estimates, dividends, splits for a given symbol or the market. If market is passed for the symbol, IPOs will also be included. https://iexcloud.io/docs/api/#upcoming-events Args: symbol (str): Symbol to look up exactDate (str): exactDate Optional. Exact date for which to get data token (str): Access token version (str): API version filter (str): filters: https://iexcloud.io/docs/api/#filter-results format (str): return format, defaults to json Returns: dict or DataFrame: result """ _raiseIfNotStr(symbol) symbol = _quoteSymbols(symbol) if symbol: url = "stock/{}/upcoming-ipos".format(symbol) else: url = "stock/market/upcoming-ipos" if exactDate: url += "?exactDate={}".format(exactDate) return _get(url, token, version, filter)
0e456c36fb4cbb11eb863f22ae06fb01589fc21a
3,659,369
from typing import Iterable from typing import List def sort_tokens(tokens: Iterable[Cwf]) -> List[Cwf]: """Sort tokens by natural order (sent, offset)""" return sorted(tokens, key=lambda t: (t.get_sent(), int(t.get_offset())))
6b9ced6bdb72a1f53c7e721f5212894caa2c4756
3,659,370
def deep_seq_map(xss, fun, keys=None, fun_name=None, expand=False): """Applies fun to list of or dict of lists; adds the results in-place. Usage: Transform a corpus iteratively by applying functions like `tokenize`, `lower`, or vocabulary functions (word -> embedding id) to it. from jtr.sisyphos.vocab import Vocab vocab = Vocab() keys = ['question', 'support'] corpus = deep_map(corpus, lambda x: x.lower(), keys) corpus = deep_map(corpus, tokenize, keys) corpus = deep_map(corpus, vocab, keys) corpus = deep_map(corpus, vocab._normalize, keys=keys) -> through tokenize we go from a dict of sentences to a dict of words (list of lists), thus we now apply deep_seq_map for processing to add start of and end of sentence tags: corpus = deep_seq_map(corpus, lambda xs: ["<SOS>"] + xs + ["<EOS>"], ['question', 'support']) -> From here we can create batches from the corpus and feed it into a model. In case `expand==False` each top-level entry of `xs` to be transformed replaces the original entry. `deep_map` supports `xs` to be a dictionary or a list/tuple: - In case `xs` is a dictionary, its transformed value is also a dictionary, and `keys` contains the keys of the values to be transformed. - In case `xs` is a list/tuple, `keys` contains the indices of the entries to be transformed The function `deep_map` is recursively applied to the values of `xs`; the function `fun` takes a sequence as input, and is applied at the one but deepest level, where the entries are sequences of objects (no longer sequences of sequences). This is the only difference with `deep_map` Args: `xs`: a sequence (list/tuple) of objects or sequences of objects. `fun`: a function to transform sequences `keys`: seq with keys if `xs` is dict; seq with integer indices if `xs` is seq. For entries not in `keys`, the original `xs` value is retained. `fun_name`: default value 'trf'; string with function tag (e.g. 'lengths'), used if '''expand==True''' and '''isinstance(xs,dict)''' Say for example fun_name='count', and `keys` contains 'sentence', then the transformed dict would look like '''{'sentence':[sentences], 'sentence_lengths':[fun(sentences)] ...}''' Returns: Transformed sequence or dictionary. Example: >>> dave = [ ... "All work and no play makes Jack a dull boy", ... "All work and no play makes Jack a dull boy.", ... "All work and no play makes Jack a very dull boy!"] >>> jack = [ ... "I'm sorry Dave, I'm afraid I can't do that!", ... "I'm sorry Dave, I'm afraid I can't do that", ... "I'm sorry Dave, I'm afraid I cannot do that"] >>> support = [ ... ["Play makes really dull", "really dull"], ... ["Dave is human"], ... ["All work", "all dull", "dull"]] >>> data2 = {'dave': dave, 'jack': jack, 'support': support} >>> vocab2 = Vocab() >>> data2_processed = deep_map(data2, lambda x: tokenize(x.lower())) >>> data2_ids = deep_map(data2_processed, vocab2) >>> data2_ids_with_lengths = deep_seq_map(data2_ids, lambda xs: len(xs), keys=['dave','jack','support'], ... fun_name='lengths', expand=True) >>> pprint.pprint(data2_ids_with_lengths) {'dave': [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [1, 2, 3, 4, 5, 6, 7, 8, 12, 9, 10, 13]], 'dave_lengths': [10, 11, 12], 'jack': [[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24, 13], [14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24], [14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 25, 23, 24]], 'jack_lengths': [17, 16, 14], 'support': [[[5, 6, 26, 9], [26, 9]], [[18, 27, 28]], [[1, 2], [1, 9], [9]]], 'support_lengths': [[4, 2], [3], [2, 2, 1]]} """ if isinstance(xss, list) and all([not isinstance(xs, list) for xs in xss]): return fun(xss) else: if isinstance(xss, dict): xss_mapped = {} for k, xs in xss.items(): if keys is None or k in keys: if expand: xss_mapped[k] = xs k = '%s_%s' % (str(k), str(fun_name) if fun_name is not None else 'trf') if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]): xss_mapped[k] = fun(xs) else: xss_mapped[k] = deep_seq_map(xs, fun) # fun_name not needed, because expand==False else: xss_mapped[k] = xs else: xss_mapped = [] for k, xs in enumerate(xss): if keys is None or k in keys: if expand: xss_mapped.append(xs) if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]): xss_mapped.append(fun(xs)) else: xss_mapped.append(deep_seq_map(xs, fun)) else: xss_mapped.append(xs) return xss_mapped
59406ae1ee87bfea82f4b22fb3d5fb96c29ccda6
3,659,371
def create_user(steamid, admin): """Create a user""" steamid = string_to_steamid(steamid) if not steamid.is_valid() or not steamid.type == EType.Individual: echo('Invalid steam ID') return 1 user = User(steamid64=steamid.as_64, admin=admin) user.refresh_name() if user.name is not None: db.session.add(user) db.session.commit() echo('added ' + user.name) else: echo('No such steam user') return 1
22f6a63d85d57e7df0ae5dad0e62e41ee7c6388a
3,659,372
import json def get_vocabularies(): """ Return the currently used ontology :return: """ vocabs = vocs.get_vocabularies() vocabs = [(x, url_for('get_vocabulary', vid=x, _external=True)) for x in vocabs] response = make_response(json.dumps(dict(vocabs))) response.headers['Content-Type'] = 'application/json' return response
3b447b297209e8d6fa238ba8f0cf932f0e3eed84
3,659,373
def do3byte(*args): """do3byte(ea_t ea, asize_t length) -> bool""" return _idaapi.do3byte(*args)
fa82f7dc5bfa5dcaea7db604ea4c7f1fc5883794
3,659,374
import logging def compute_sap(ground_truth_data, representation_function, random_state, num_train=gin.REQUIRED, num_test=gin.REQUIRED, batch_size=16, continuous_factors=gin.REQUIRED): """Computes the SAP score. Args: ground_truth_data: GroundTruthData to be sampled from. representation_function: Function that takes observations as input and outputs a dim_representation sized representation for each observation. random_state: Numpy random state used for randomness. num_train: Number of points used for training. num_test: Number of points used for testing discrete variables. batch_size: Batch size for sampling. continuous_factors: Factors are continuous variable (True) or not (False). Returns: Dictionary with SAP score. """ logging.info("Generating training set.") mus, ys = utils.generate_batch_factor_code( ground_truth_data, representation_function, num_train, random_state, batch_size) mus_test, ys_test = utils.generate_batch_factor_code( ground_truth_data, representation_function, num_test, random_state, batch_size) logging.info("Computing score matrix.") score_matrix = _compute_score_matrix(mus, ys, mus_test, ys_test, continuous_factors) # Score matrix should have shape [num_latents, num_factors]. assert score_matrix.shape[0] == mus.shape[0] assert score_matrix.shape[1] == ys.shape[0] scores_dict = {} scores_dict["SAP_score"] = _compute_avg_diff_top_two(score_matrix) logging.info("SAP score: %.2g", scores_dict["SAP_score"]) return scores_dict
f9aad7f12597491cb57d5b7180debb76da3bc01f
3,659,375
import re def clean_cmd(cmd): """Removes multiple spaces and whitespace at beginning or end of command. Args: cmd (str): A string containing the command to clean. Returns: A cleaned command string. """ return re.sub(r'\s{2, }', ' ', cmd).strip(' \t\n\r')
d98f4fea9791cbb5936b306ee74335efc6515902
3,659,376
def multiply_scalar(mat, value): """ Multiplies every element in the matrix by the given scalar value. Args: mat (Matrix): The input matrix. value (int or float): The number that mat will be multipled by. Returns: Matrix: The resulting matrix from the multiplication of mat and value. """ mat_1d = flatten(mat.data) result = [x * value for x in mat_1d] return reshape(Matrix(result), mat.shape())
3b2469213ddb93e06ce210ee082a403f5ed2cc4a
3,659,377
def bin4D(data4D, bin_factor): """ Bin 4D data in spectral dimensions Parameters ---------- data4D: ndarray of shape (4,4) the first two dimensions are Fourier space, while the next two dimensions are real space bin_factor: int Value by which to bin data Returns ------- binned_data: ndarray of shape (4,4) Data binned in the spectral dimensions Notes ----- The data is binned in the first two dimensions - which are the Fourier dimensions using the internal numba functions `resizer2D_numbaopt` and `resizer1D_numbaopt` See Also -------- resizer1D_numbaopt resizer2D_numbaopt """ data4D_flat = np.reshape( data4D, (data4D.shape[0], data4D.shape[1], data4D.shape[2] * data4D.shape[3]) ) datashape = np.asarray(data4D_flat.shape) res_shape = np.copy(datashape) res_shape[0:2] = np.round(datashape[0:2] / bin_factor) data4D_res = np.zeros(res_shape.astype(int), dtype=data4D_flat.dtype) resampled_x = np.zeros((datashape[0], res_shape[1]), data4D_flat.dtype) resampled_f = np.zeros(res_shape[0:2], dtype=data4D_flat.dtype) for zz in range(data4D_flat.shape[-1]): data4D_res[:, :, zz] = resizer2D_numbaopt( data4D_flat[:, :, zz], resampled_x, resampled_f, bin_factor ) binned_data = np.reshape( data4D_res, (resampled_f.shape[0], resampled_f.shape[1], data4D.shape[2], data4D.shape[3]), ) return binned_data
baa00278bb5e4c7fcef6f1a78019f227533de586
3,659,378
def lorentz(x, a, mu, ga): """ Input: x - value and a=I, mu=x_0, ga - lorentz f. coeffitients (float) Return: value of function with desired parameters in x (float) Descr.: Calculate L-type function for given x and parameters""" return (a * ga ** 2) / ((x - mu) ** 2 + ga ** 2)
1af83bdca1ff14f25da86cb0f3dacbd36409f1e1
3,659,379
def main(arguments): """ if you call this then it will create and return the thunder obj for you :param arguments: a thunder object or a dicitonary to initialise the thunder obj :return: """ thunder = Thunder(deepcopy(arguments)) # load object return thunder
d628d6b1fb3550b1d1056613680437b847ab7102
3,659,380
def make_train_func( model, loss_func, optimizer, dtype=None, device=None, call_model=None, get_train_loss=None, ): """Create a train func for ``ignite``. This function assumes that each batch is of the form ``(x, y)`` with no assumptions placed on ``x`` or ``y``. Each batch will be transformed into a ``torch.Tensor``. :param model: the model to optimize, it will be called with the features :param loss_func: the loss function to optimize, it will be called with the model output and the targets. The return value of the loss function must be compatible with ``get_train_loss``. :param optimizer: the optimizer to use :param dtype: the dtype of the batch, can be a structured object, e.g., a tuple of dtypes :param device: the device to assign to the batch, can be a structured object, e.g., a tuple of devices :param call_model: instead of calling the model directly, ``call_model(model, x)`` it will be used. If not given a default implementation is used, that passes ``x`` as varargs if it is a tuple, as keyword args if it is a dict and directly otherwise. :param get_train_loss: The output of ``loss_func`` will be passed through ``get_train_loss`` before calling backward. If not given a default implementation is used that takes the first item of the loss if it is a tuple and the loss directly otherwise. """ def default_get_train_loss(loss): return loss[0] if isinstance(loss, tuple) else loss if call_model is None: call_model = default_call_model if get_train_loss is None: get_train_loss = default_get_train_loss def train_func(engine, batch): x, y = n2t(batch, dtype=dtype, device=device) optimizer.zero_grad() pred = call_model(model, x) loss = loss_func(pred, y) train_loss = get_train_loss(loss) train_loss.backward() optimizer.step() return t2n(loss) return train_func
5ceb230e7c6fce891a23416883b12bd09c83ccd5
3,659,381
import time def waitAndLocate(btn_img, params): """ Function to locate a button in the window :param btn_img: path to the image of the button to look for :return: coordinates + dimensions of the button """ start = time.time() while True: if time.time() - start > (3*60): print("Timeout Error") raise TimeoutError(f"wait and locate exceeded {str(time.time()-start)}") # Find window and maximize if 'no_fullscreen' not in params or params['no_fullscreen'] == False: maximizeWindows(params) # Make foreground window full screen - replaced with exact window name lookup # win32gui.ShowWindow(win32gui.GetForegroundWindow(), win32con.SW_MAXIMIZE) # Look for the button on the screen res = pyautogui.locateOnScreen(btn_img, confidence=0.75) # If button is found, return the location if (res): return res # Wait 0.5 seconds before retrying to keep CPU usage low time.sleep(0.5)
daf7c32d67f1d958c8dcd15e3721823863b44365
3,659,382
def quick_sort(array): """ Not Inplace, but Standard version """ if array == []: return [] else: pivot = array[-1] smaller = quick_sort([x for x in array[0:-1] if x <= pivot]) larger = quick_sort([x for x in array[0:-1] if x > pivot]) return smaller + [pivot] + larger
40b969855394600a94ed264f5bffade95c72455e
3,659,384
def calc_laplacian_matrix(D, W): """ 给定图的度矩阵和相似度矩阵,计算拉普拉斯矩阵 :param W: 相似度矩阵 :param D: 图的度矩阵 :return: 拉普拉斯矩阵 """ return D - W
542efe382457a34587615d24935c040238098610
3,659,385
def _potrf_mhlo(platform, gpu_solver, dtype, a, lower): """Cholesky decomposition.""" a_type = ir.RankedTensorType(a.type) dims = a_type.shape m, n = dims[-2:] assert m == n batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) batch = _prod(batch_dims) lwork, opaque = gpu_solver.build_potrf_descriptor( np.dtype(dtype), lower, batch, n) layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) info_layout = tuple(range(num_bd - 1, -1, -1)) i32_type = ir.IntegerType.get_signless(32) info_type = ir.RankedTensorType.get(batch_dims, i32_type) work_layout = [0] out = custom_call( f"{platform}solver_potrf", [ a.type, info_type, ir.RankedTensorType.get([lwork], ir.IntegerType.get_signless(8)), ], [a], backend_config=opaque, operand_layouts=[layout], result_layouts=[layout, info_layout, work_layout]) return out[:2]
6f9a8aef2bec2d063ebaf7c739b11879a67fd342
3,659,386
def _bin2bcd(value): """Convert a binary value to binary coded decimal. :param value: the binary value to convert to BCD. (required, no default) """ return value + 6 * (value // 10)
508383fe8964da3a09699ee8e68f36cea4162746
3,659,387
import requests from bs4 import BeautifulSoup def osm_get_info(idx): """Получаем информацию об административной территории """ link = 'https://www.openstreetmap.org/api/0.6/relation/' + str(idx) response = requests.get(link) if response.status_code == 200: soup = BeautifulSoup(response.text, 'xml') subarea_ids = [member.get('ref') for member in soup.find_all('member', {'role':'subarea'})] name = soup.find('tag', {'k': 'name'}) name = name.get('v') return name, subarea_ids return False
0d2ee403c6bcf3a5c5ee37d01b9b0925bfac6081
3,659,388
import sqlite3 def get_test_cases_coverage(session_id): """ coverage by test case """ tc_stats={} tc_stats_list=[] total_executed=0 sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!="null"' params={"sid":session_id} conn=sqlite3.connect(CONNECTION_STRING) c=conn.cursor() c.execute(sql,params) tests=c.fetchall() conn.close() if len(tests)>0: for t in tests: total_executed=0 sql="SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid" params={"sid":session_id,"tid":t[0]} conn=sqlite3.connect(CONNECTION_STRING) c=conn.cursor() c.execute(sql,params) files=c.fetchall() conn.close() for f in files: line_count=get_executable_lines_count_for_file(f[0]) # get executions sql="SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid" params={"sid":session_id,"tid":t[0],"fid":f[0]} conn=sqlite3.connect(CONNECTION_STRING) c=conn.cursor() c.execute(sql,params) executed=c.fetchone() conn.close() total_executed+=executed[0] # save test case and it's executions tc_stats={} tc_stats["test_id"]=t[0] tc_stats["total_executed"]=total_executed tc_stats["total_executed"] tc_stats_list.append(tc_stats) return tc_stats_list
acceee566f53b95316c9ccd2654b89f6a60c7a5a
3,659,389
def can_minimize_file(file_path): """Check to see if we support minimization for this file.""" # If this is not a binary file, we should be able to minimize it in some way. if not utils.is_binary_file(file_path): return True # Attempt to minimize IPC dumps. if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION): return supports_ipc_minimization(file_path) # Other binary file formats are not supported. return False
3d8f5e2ee4f834a353ce8973cd64da66712c8c1c
3,659,390
def generate_new_xen_xml(VIRSH_TEMPLATE, vm_name, disk_img, mac_addr, memory_size=1048576, # 1GB of memory cpu_count=1): """ Given a name, disk, and mac, this will output the appropriate xml config """ tmp = VIRSH_TEMPLATE tmp = tmp.replace(REPLACE_STRINGS.vm_name, vm_name) tmp = tmp.replace(REPLACE_STRINGS.disk_img, disk_img) tmp = tmp.replace(REPLACE_STRINGS.mac_addr, mac_addr) tmp = tmp.replace(REPLACE_STRINGS.memory_size, str(memory_size)) tmp = tmp.replace(REPLACE_STRINGS.cpu_count, str(cpu_count)) return tmp
28eec535a5924a847bd887b903014aca4a97dd9b
3,659,391
def literal_query(query): """Don't interprete any special query syntax SQLite's FTS extensions support special query syntax for AND, OR and prefix searches, as well as grouping and negation. There are not of much use in the dictionary case, but they break some legitimate queries. So let's treat all queries literally by enlosing them in quotes. """ return '"' + query.replace('"', '') + '"'
65c5f3215a2d36fb15b54e5420ce52ac27d1b420
3,659,392
def graph_from_string(s): """ Turn a string like "1 2; 1->2" into a graph. """ vertex_string, edge_string = s.split(';') vertices = vertex_string.split() edge_pairs = [] for edge_sequence in edge_string.split(): sequence_nodes = edge_sequence.split('->') for tail, head in zip(sequence_nodes[:-1], sequence_nodes[1:]): edge_pairs.append((tail, head)) return DirectedGraph.from_edge_pairs(vertices, edge_pairs)
772c876eb4c38fb4d595ee57fb5192622c92e837
3,659,394
def WideResnetBlocknt(channels, strides=(1,1), channel_mismatch=False, batchnorm='std', parameterization='ntk'): """A WideResnet block, with or without BatchNorm.""" Main = stax_nt.serial(_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization),_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), padding='SAME', parameterization=parameterization)) Shortcut = stax_nt.Identity() if not channel_mismatch else stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization) return stax_nt.serial(stax_nt.FanOut(2), stax_nt.parallel(Main, Shortcut), stax_nt.FanInSum())
d1786bf36703669627807f9bf881630ff1592ef5
3,659,395
import torch def inverse_pinhole_matrix(pinhole, eps=1e-6): """ Returns the inverted pinhole matrix from a pinhole model """ assert len(pinhole.shape) == 2 and pinhole.shape[1] == 12, pinhole.shape # unpack pinhole values fx, fy, cx, cy = torch.chunk(pinhole[..., :4], 4, dim=1) # Nx1 # create output container k = torch.eye(4, device=pinhole.device, dtype=pinhole.dtype) k = k.view(1, 4, 4).repeat(pinhole.shape[0], 1, 1) # Nx4x4 # fill output with inverse values k[..., 0, 0:1] = 1. / (fx + eps) k[..., 1, 1:2] = 1. / (fy + eps) k[..., 0, 2:3] = -1. * cx / (fx + eps) k[..., 1, 2:3] = -1. * cy / (fy + eps) return k
e2fd741598b858f9d8731e4dc2b0c79913941dbf
3,659,396
from unittest.mock import patch async def init_integration_empty_response(hass) -> MockConfigEntry: """Set up the Nightscout integration in Home Assistant.""" entry = MockConfigEntry( domain=DOMAIN, data={CONF_URL: "https://some.url:1234"}, ) with patch( "homeassistant.components.nightscout.NightscoutAPI.get_sgvs", return_value=[] ), patch( "homeassistant.components.nightscout.NightscoutAPI.get_server_status", return_value=SERVER_STATUS, ): entry.add_to_hass(hass) await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() return entry
890230dabafbfa939433251fb1b4ba2a9b14a7bf
3,659,397
def create_DCT_NETWORK_INFO(networkid: str) -> dict: """Computes dictionary DCT_NETWORK_INFO for XML file :param networkid: network identifier :type networkid: str :return: dict :rtype: [type] """ DCT_NETWORK_INFO.update({"id": networkid}) return DCT_NETWORK_INFO
96eeb48e35bebfc4bc1923685f8bb627dfc5f473
3,659,398
def retrieve_question(request, uuid): """ """ try: question = Question.objects.get(pk=uuid) except (Question.DoesNotExist, ValueError): response_data = { "error": { "state": "not found", "details": "Question object with ID {} could not be found.".format(uuid) } } return Response(response_data, status=status.HTTP_404_NOT_FOUND) if question.survey.is_private: if request.user.is_authenticated: if request.user == question.survey.admin or request.user in question.survey.users.all(): serializer = QuestionSerializer(question, context={'request': request}) response_data = serializer.data return Response(response_data, status=status.HTTP_200_OK) else: return Response({"error": "This question is part of a private survey."}, status=status.HTTP_403_FORBIDDEN) else: return Response({"error": "Please login."}, status=status.HTTP_401_UNAUTHORIZED) else: serializer = QuestionSerializer(question, context={'request': request}) response_data = serializer.data return Response(response_data, status=status.HTTP_200_OK)
ee6409c2b724977744d66d3fba7efa17fa75284c
3,659,399
def gamescriptToJson(title: str, version: str = None) -> dict: """ Get game script heirarchy as a dictionary (for saving as json, etc) """ scripts = GameScript.objects.all().filter(title=title) if version: scripts = scripts.filter(version=version) if len(scripts) == 0: print("No title with that name and version") return if len(scripts) > 1: print("The following titles with versions were found. Please choose one.") print([script.title for script in scripts]) return script = scripts[0] return script.toJson()
c76779b76b69fb1816f9e96136fdee903212d831
3,659,400
def is_ignored_faces(faces): """Check if the faces are ignored faces. Args: faces: Encoded face from face_recognition. Returns: bool: If a not ignored face appeared, return false, otherwise true. """ global ignored_faces for face in faces: matches = face_recognition.compare_faces(ignored_faces, face) if False in matches: return False return True
bda7703cfb471ac5c95cb6aa30f6d758129ae8a5
3,659,401
from typing import Optional def get_prediction_model_status(hub_name: Optional[str] = None, prediction_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPredictionModelStatusResult: """ The prediction model status. :param str hub_name: The name of the hub. :param str prediction_name: The name of the Prediction. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['hubName'] = hub_name __args__['predictionName'] = prediction_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:customerinsights/v20170426:getPredictionModelStatus', __args__, opts=opts, typ=GetPredictionModelStatusResult).value return AwaitableGetPredictionModelStatusResult( message=__ret__.message, model_version=__ret__.model_version, prediction_guid_id=__ret__.prediction_guid_id, prediction_name=__ret__.prediction_name, signals_used=__ret__.signals_used, status=__ret__.status, tenant_id=__ret__.tenant_id, test_set_count=__ret__.test_set_count, training_accuracy=__ret__.training_accuracy, training_set_count=__ret__.training_set_count, validation_set_count=__ret__.validation_set_count)
14ff24d3f7edf674c5cd29a643ae28a1a3d8ed99
3,659,402
def build_2d_grid(ir): """ Build simple grid with a column for each gate.""" grid = [] for g in ir.gates: step = [None] * ir.ngates if g.is_single(): step[g.idx0] = g if g.is_ctl(): step[g.ctl] = g.ctl step[g.idx1] = g grid.append(step) return grid
55c17327fb530301ca505b42cdb8d47426491374
3,659,403
from typing import Union from typing import List from typing import Tuple from typing import Dict from typing import Any import copy def emmental_collate_fn( batch: Union[List[Tuple[Dict[str, Any], Dict[str, Tensor]]], List[Dict[str, Any]]], min_data_len: int = 0, max_data_len: int = 0, ) -> Union[Tuple[Dict[str, Any], Dict[str, Tensor]], Dict[str, Any]]: """Collate function. Args: batch: The batch to collate. min_data_len: The minimal data sequence length, defaults to 0. max_data_len: The maximal data sequence length (0 means no limit), defaults to 0. Returns: The collated batch. """ X_batch: defaultdict = defaultdict(list) Y_batch: defaultdict = defaultdict(list) for item in batch: # Check if batch is (x_dict, y_dict) pair if isinstance(item, dict): x_dict = item y_dict: Dict[str, Any] = dict() else: x_dict, y_dict = item for field_name, value in x_dict.items(): if isinstance(value, list): X_batch[field_name] += value else: X_batch[field_name].append(value) for label_name, value in y_dict.items(): if isinstance(value, list): Y_batch[label_name] += value else: Y_batch[label_name].append(value) field_names = copy.deepcopy(list(X_batch.keys())) for field_name in field_names: values = X_batch[field_name] # Only merge list of tensors if isinstance(values[0], Tensor): item_tensor, item_mask_tensor = list_to_tensor( values, min_len=min_data_len, max_len=max_data_len, ) X_batch[field_name] = item_tensor if item_mask_tensor is not None: X_batch[f"{field_name}_mask"] = item_mask_tensor for label_name, values in Y_batch.items(): Y_batch[label_name] = list_to_tensor( values, min_len=min_data_len, max_len=max_data_len, )[0] if len(Y_batch) != 0: return dict(X_batch), dict(Y_batch) else: return dict(X_batch)
b18c7ebf50f5554055da5de8a2ddce9e758ea1ef
3,659,405
def trap_jac_factory(j, dt): """Factory function to return a function for evaluating the Jacobian of the trapezoidal formula. This returns a function of x_n (x at this time step). :param j: Jacobian of the function of x. :param dt: time step. :returns: trap_jac, callable which takes x_n and evaluates the Jacobian of the trapezoidal formula. """ def trap_jac(x_n): """Function to compute the Jacobian of the implicit trapezoidal equation. """ return np.identity(x_n.shape[0]) - dt / 2 * j(x_n) return trap_jac
5e6c365b6b92c13577249d34e7580827dc894604
3,659,406
from pathlib import Path def get_position_object(file_path: FilePathType): """ Read position data from .bin or .pos file and convert to pynwb.behavior.SpatialSeries objects. If possible it should always be preferred to read position data from the `.bin` file to ensure samples are locked to ecephys time courses. Parameters: ---------- file_path (Path or Str): Full file_path of Axona file with any extension. Returns: ------- position (pynwb.behavior.Position) """ position = Position() position_channel_names = [ "time(ms)", "X", "Y", "x", "y", "PX", "px", "px_total", "unused", ] if Path(file_path).suffix == ".bin": position_data = read_bin_file_position_data(file_path) else: position_data = read_pos_file_position_data(file_path) position_timestamps = position_data[:, 0] for ichan in range(0, position_data.shape[1]): spatial_series = SpatialSeries( name=position_channel_names[ichan], timestamps=position_timestamps, data=position_data[:, ichan], reference_frame="start of raw acquisition (.bin file)", ) position.add_spatial_series(spatial_series) return position
2d20e5b0a4f7077748650e7a3e3054c79b68185c
3,659,407
import random def throw_dice(n): """Throw `n` dice, returns list of integers""" results = [] while n > 0: results += [random.randint(1,6)] n = n-1 return results
68c56b468ecd1eff59932099dd4620bae9581f45
3,659,408
import json def verify_token_signature(token): """Verify the signature of the token and return the claims such as subject/username on valid signature""" key = jwk.JWK.from_password(flask.current_app.config.get("SECRET_KEY")) try: jwttoken = jwt.JWT(key=key, jwt=token, algs=["HS256"]) return json.loads(jwttoken.claims) except jwt.JWTExpired: # jwt dependency uses a 60 seconds leeway to check exp # it also prints out a stack trace for it, so we handle it here raise AuthenticationError(message="Expired token")
d93233acb8a26ba0552ddc26777ccab4e40c4306
3,659,409
def logtime_r2(t, y, ppd): """ Convert y=f(t) data from linear in time to logarithmic in time. Args: t: is the input time vector, linearly spaced y: is the input vector of y values ppd: number of points per decade for the output Returns: A 3-tuple (tout, yout, wt) where tout and yout are logarithimically-spaced versions of t and y and wt is a vector of weights giving the number of points averaged for each yout value. """ zt = len(t) zy = len(y) assert zt == zy # Find the index of t = 0 by taking the index where t^2 is minimum. indzero = np.argmin(np.power(t,2)) if t[indzero] < 0: indzero += 1 # tmin is minimum nonzero value of t after start. tmin = t[indzero] tmax = np.max(t) if tmin == 0: tmin = t[indzero+1] ltmin = np.log10(tmin) ltmax = np.log10(tmax) tt = np.arange(ltmin, ltmax, 1/(2*ppd)) tt = np.power(10, tt) ztt = tt.size # perform resampling from indzero to end, forward in time icount, jcount = indzero, 0 tout, yout, wt = np.zeros(ztt), np.zeros(ztt), np.zeros(ztt) for i in np.arange(1, ztt, 2): # accumulate points until we reach the end of the interval while icount < zt and t[icount] < tt[i]: tout[jcount] = tout[jcount] + t[icount] yout[jcount] = yout[jcount] + y[icount] wt[jcount] += 1 icount += 1 # If we accumulated data points, then average by the number of points. if wt[jcount] > 0: tout[jcount] = tout[jcount] / wt[jcount]; yout[jcount] = yout[jcount] / wt[jcount]; jcount += 1 # Purposely allocated too much space at the start. Trim zeroes from the end. yout = np.trim_zeros(yout, 'b') tout = tout[:yout.size] wt = wt[:yout.size] # If we started at the beginning, then we are done. if indzero == 0: return (tout, yout, wt) # If not, perform resampling from indzero backwards in time. tmp_t, tmp_y = -t[indzero-1::-1], y[indzero-1::-1] tmp_zt = len(tmp_t) icount, jcount = 0, 0 tmp_tout, tmp_yout, tmp_wt = np.zeros(ztt), np.zeros(ztt), np.zeros(ztt) for i in np.arange(1, ztt, 2): while icount < tmp_zt and tmp_t[icount] < tt[i]: tmp_tout[jcount] = tmp_tout[jcount] + tmp_t[icount] tmp_yout[jcount] = tmp_yout[jcount] + tmp_y[icount] tmp_wt[jcount] += 1 icount += 1 if tmp_wt[jcount] > 0: tmp_tout[jcount] = tmp_tout[jcount] / tmp_wt[jcount]; tmp_yout[jcount] = tmp_yout[jcount] / tmp_wt[jcount]; jcount += 1 # Purposely allocated too much space at the start. Trim zeroes from the end. tmp_yout = np.trim_zeros(tmp_yout, 'b') tmp_tout = tmp_tout[:tmp_yout.size] tmp_wt = tmp_wt[:tmp_yout.size] # Concat results and return return (np.concatenate([-tmp_tout[::-1], tout]), np.concatenate([tmp_yout[::-1], yout]), np.concatenate([tmp_wt[::-1], wt]))
ed77d7665488d3620d5cb62f4ba443b2361944b4
3,659,410
def parcours_serpentin(n): """Retourne la liste des indices (colonne,ligne) (!!attention ici ligne et colonne sont inversées!!) des cases correspondant à un parcours de tableau de taille n x n en serpentin. Ex: pour T = [ [1,2,3], [4,5,6], [7,8,9] ] le parcours correspond aux cases 1,2,3,6,9,8,7,4,5 et la fonction retournera la liste d'indices [(0,0),(1,0),(2,0),(2,1) ...] """ return []
189e486ad82d75923244daf51c223254f7b29fcc
3,659,411
def bdev_rbd_unregister_cluster(client, name): """Remove Rados cluster object from the system. Args: name: name of Rados cluster object to unregister """ params = {'name': name} return client.call('bdev_rbd_unregister_cluster', params)
03bf70352b8df65044eba1c9ece4b156590e11bc
3,659,412
def get_rndc_secret(): """Use the singleton from the DesignateBindCharm to retrieve the RNDC secret :returns: str or None. Secret if available, None if not. """ return DesignateBindCharm.singleton.get_rndc_secret()
b6fb5aebd272a6bc4db7d6541112566109e28195
3,659,413
def transform_tweet(source_tweet): """ Perform transformation on one tweet, producing a new, transformed tweet. :param source_tweet: Tweet text to transform :type source_tweet: str :return: Transformed tweet text :rtype: str """ no_emojis = replace_emojis(source_tweet) as_tokens = tokenize_string(no_emojis) result = ' '.join(as_tokens) if not result: return pd.NaT else: return result
9c4722200c7c85157aefca0c65946b6dd0e264d5
3,659,414
import scipy import time def fitDataBFGSM2(M, val, c_w_l, init=None, nozero=True, k=3e34, lam=1., name='W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave'): #init is the three initial values of the gaussian needed to fit the data """ function for determining the optimal fit given the desired parabolic regularization""" #intialize start position temp = scipy.io.readsav(name) init = temp['abundance'][:,36] reg = gen2Diff(init) bndarray = scipy.ones((len(init),2)) bndarray[:,0] = 1e-10 bndarray[:,1] = 1e10 Te = temp['en'] y = time.time() output = scipy.optimize.minimize(fullObjectiveLog, init, args=(val, c_w_l, M/k), jac=objectiveLogJac2, bounds=bndarray) print(time.time()-y) return output
35ddd0690e2ed60d6271f9be232cea3d808d562f
3,659,416
def set_complete_cfg_spacy(false_or_true: str): """Set all SpaCy configuration parameters to the same logical value.""" return pytest.helpers.backup_config_params( cfg.cls_setup.Setup._DCR_CFG_SECTION_SPACY, [ (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_CLUSTER, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_DEP_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_DOC, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_IOB_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_KB_ID_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_TYPE_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_HEAD, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_I, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IDX, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_ALPHA, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_ASCII, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_BRACKET, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_CURRENCY, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_DIGIT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_LEFT_PUNCT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_LOWER, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_OOV, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_PUNCT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_QUOTE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_RIGHT_PUNCT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SENT_END, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SENT_START, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SPACE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_STOP, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_TITLE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_UPPER, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LANG_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEFT_EDGE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEMMA_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEX, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEX_ID, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_EMAIL, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_NUM, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_URL, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LOWER_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_MORPH, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_NORM_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ORTH_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_POS_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_PREFIX_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_PROB, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_RANK, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_RIGHT_EDGE, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SENT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SENTIMENT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SHAPE_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SUFFIX_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TAG_, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TENSOR, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TEXT, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TEXT_WITH_WS, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_VOCAB, false_or_true), (cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_WHITESPACE_, false_or_true), ], )
10ac74714e11b8c8492de7ec1d2809323819b8eb
3,659,417
def get_lun_ids(service_instance=None): """ Return a list of LUN (Logical Unit Number) NAA (Network Addressing Authority) IDs. """ if service_instance is None: service_instance = get_service_instance(opts=__opts__, pillar=__pillar__) hosts = utils_esxi.get_hosts(service_instance=service_instance, get_all_hosts=True) ids = [] for host in hosts: for datastore in host.datastore: for extent in datastore.info.vmfs.extent: ids.append(extent.diskName) return ids
6194a8f73a71730d928391a492d5e8fe0fdb3f50
3,659,419
def parse_midi_file(midi_file, max_notes=float('Inf'), max_time_signatures=1, max_tempos=1, ignore_polyphonic_notes=True, convert_to_drums=False, steps_per_quarter=16): """Summary Parameters ---------- midi_file : TYPE Description max_notes : TYPE, optional Description max_time_signatures : int, optional Description max_tempos : int, optional Description ignore_polyphonic_notes : bool, optional Description convert_to_drums : bool, optional Description steps_per_quarter : int, optional Description Returns ------- TYPE Description """ seq = midi_io.midi_file_to_sequence_proto(midi_file) while len(seq.notes) > max_notes: seq.notes.pop() while len(seq.time_signatures) > max_time_signatures: seq.time_signatures.pop() while len(seq.tempos) > max_tempos: seq.tempos.pop() if convert_to_drums: for note_i in range(len(seq.notes)): seq.notes[note_i].program = 10 if ignore_polyphonic_notes: convert_to_monophonic(seq) seq = sequences_lib.quantize_note_sequence( seq, steps_per_quarter=steps_per_quarter) if seq.tempos: qpm = seq.tempos[0].qpm else: qpm = 120 melody = Melody() melody.from_quantized_sequence( seq, ignore_polyphonic_notes=ignore_polyphonic_notes) seq = melody.to_sequence(qpm=qpm) return seq, qpm
6c3ce0135bf45a8992f94197f5b10ab472407f40
3,659,420
def filter_prediction(disable_valid_filter, disable_extra_one_word_filter, pred_token_2dlist_stemmed): """ Remove the duplicate predictions, can optionally remove invalid predictions and extra one word predictions :param disable_valid_filter: :param disable_extra_one_word_filter: :param pred_token_2dlist_stemmed: :param pred_token_2d_list: :return: """ num_predictions = len(pred_token_2dlist_stemmed) is_unique_mask = check_duplicate_keyphrases(pred_token_2dlist_stemmed) # boolean array, 1=unqiue, 0=duplicate pred_filter = is_unique_mask if not disable_valid_filter: is_valid_mask = check_valid_keyphrases(pred_token_2dlist_stemmed) pred_filter = pred_filter * is_valid_mask if not disable_extra_one_word_filter: extra_one_word_seqs_mask, num_one_word_seqs = compute_extra_one_word_seqs_mask(pred_token_2dlist_stemmed) pred_filter = pred_filter * extra_one_word_seqs_mask filtered_stemmed_pred_str_list = [word_list for word_list, is_keep in zip(pred_token_2dlist_stemmed, pred_filter) if is_keep] num_duplicated_predictions = num_predictions - np.sum(is_unique_mask) return filtered_stemmed_pred_str_list, num_duplicated_predictions, is_unique_mask
8cbeb93c6fdfdc64cfa5819baa903699544ccb3d
3,659,421
def simple_dict_event_extractor(row, condition_for_creating_event, id_field, timestamp_field, name_of_event): """ Takes a row of the data df and returns an event record {id, event, timestamp} if the row satisfies the condition (i.e. condition_for_creating_event(row) returns True) """ if condition_for_creating_event(row): return {'id': row[id_field], 'event': name_of_event, 'timestamp': row[timestamp_field]}
2195acf5df6f465fdf3160df3abbac54e5ac0320
3,659,422
def split_fused_prelu(input_graph_def: util.GraphDef) -> util.GraphDef: """ This function looks for fused operations that include a 'Prelu'-activation. Matching nodes will be split into individual operations. TFJS uses fused operations for performance. Some fused activations aren't supported by TF (e.g. 'Prelu'), so we need to split the fused ops back into individual ops and replace unsupported functions by equivalent supported constructs later. Args: input_graph_def: TF graph definition to examine Returns: Updated copy of the input graph with matching nodes replaced by individual operations """ def _predicate(node): return (util.is_fused_conv2d(node, b'Prelu') or util.is_fused_matmul(node, b'Prelu')) return util.replace_matching_nodes(input_graph_def, _predicate, _split_fused_op)
36b22afa67dd9259aae9f7be8ec6c4ffdf7c1167
3,659,423