content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_flavors(): """ Get Nectar vm flavors in a dict with openstack_id as key """ fls = Flavor.query.all() results = [] for fl in fls: results.append(repack(fl.json(), {"name": "flavor_name"}, ["id"])) return array_to_dict(results)
005ce92fa46689ea639594fd5341f327dc04704d
3,658,139
def _HasTrafficChanges(args): """True iff any of the traffic flags are set.""" traffic_flags = ['to_revision', 'to_latest'] return _HasChanges(args, traffic_flags)
3d638195f86dc9f383c01c92d475ca90dc4fa60b
3,658,140
def find_credentials(account): """ fumction that check if a credentials exists with that username and return true or false """ return Credentials.find_credentialls(account)
dc59eec797d606854fa8a668b234a5eb61f8a0f8
3,658,141
import tokenize def enumerate_imports(tokens): """ Iterates over *tokens* and returns a list of all imported modules. .. note:: This ignores imports using the 'as' and 'from' keywords. """ imported_modules = [] import_line = False from_import = False for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_type == tokenize.NEWLINE: import_line = False from_import = False elif token_string == "import": import_line = True elif token_string == "from": from_import = True elif import_line: if token_type == tokenize.NAME and tokens[index+1][1] != 'as': if not from_import: if token_string not in reserved_words: if token_string not in imported_modules: imported_modules.append(token_string) return imported_modules
0ee4921455899b036eb808262e183a6bc9017ccc
3,658,142
import re def safe_htcondor_attribute(attribute: str) -> str: """Convert input attribute name into a valid HTCondor attribute name HTCondor ClassAd attribute names consist only of alphanumeric characters or underscores. It is not clearly documented, but the alphanumeric characters are probably restricted to ASCII. Attribute names created from multiple words typically capitalize the first letter in each word for readability, although all comparisions are case-insensitive. e.g., "central-manager" -> "CentralManager" Args: attribute: a string representing the name of an attribute Returns: The attribute name stripped of invalid characters and re-capitalized in the manner typical of HTCondor ClassAd attributes. Raises: None """ # splitting by invalid characters removes them from the resulting array split_attr = re.split(r"[^\w]", attribute, flags=re.ASCII) safe_attr = "".join([word.capitalize() for word in split_attr if word]) return safe_attr
7a4dda539b2379120e68737d72a80226c45f5602
3,658,144
def focal_length_to_fov(focal_length, length): """Convert focal length to field-of-view (given length of screen)""" fov = 2 * np.arctan(length / (2 * focal_length)) return fov
2803de559943ce84620ac1130c099438ec1b4b12
3,658,145
def make_csv(headers, data): """ Creates a CSV given a set of headers and a list of database query results :param headers: A list containg the first row of the CSV :param data: The list of query results from the Database :returns: A str containing a csv of the query results """ # Create a list where each entry is one row of the CSV file, starting # with the headers csvRows =[','.join(headers),] # Iterate through the provided data and create the rest of the CSV's rows for datum in data: currentRow = '' for header in headers: # Get this rows value for the given header val = getattr(datum, header) if type(val) is str: # Escape the strings currentRow += '"' + val + '",' elif type(val) is float: # Don't Escape the floats currentRow += str(val) + ',' else: # If it is empty and a place holder currentRow += ',' csvRows.append(currentRow[:-1]) # Combine all of the rows into a single single string and return it. return "\n".join(csvRows)
5101d53de8dd09d8ebe743d77d71bff9aeb26334
3,658,147
def draw_color_rect(buf,ix,iy,size,wrect,color): """ draw a square centerd on x,y filled with color """ code = """ int nd = %d; int x, y, i, j; int ny = 1 + 2 * nd; int nx = ny; y = iy - nd; if (y < 0) { ny += y; y = 0; } else if ((y + ny) > dimy) ny -= y + ny - dimy; x = ix - nd; if (x < 0) { nx += x; x = 0; } else if ((x + nx) > dimx) nx -= x + nx - dimx; int k = y * dimx * 3 + 3 * x; int deltak = 3 * (dimx - nx); for (i = 0;i < ny;i++) { for (j = 0;j < nx;j++) { #if 1 *(buf+k++) = color[0]; *(buf+k++) = color[1]; *(buf+k++) = color[2]; #else *(buf+k) = (*(buf+k) / 2) + (color[0] / 2); k++; *(buf+k) = (*(buf+k) / 2) + (color[1] / 2); k++; *(buf+k) = (*(buf+k) / 2) + (color[2] / 2); k++; #endif } k += deltak; } """ %wrect (dimx,dimy) = (size[0],size[1]) #ll lqprint "XX %d %d" %(ix,iy) if(ix < 0 or iy < 0 or ix >= dimx or iy >= dimy): return() weave.inline(code,['buf' ,'ix','iy','dimx','dimy','color'])
822bc77d1e6ccb4c802a4a3335c1bba55ba14f04
3,658,148
def _compute_focus_2d(image_2d, kernel_size): """Compute a pixel-wise focus metric for a 2-d image. Parameters ---------- image_2d : np.ndarray, np.float A 2-d image with shape (y, x). kernel_size : int The size of the square used to define the neighborhood of each pixel. An odd value is preferred. Returns ------- focus : np.ndarray, np.float64 A 2-d tensor with the R(y, x) computed for each pixel of the original image. """ # mean filtered image image_filtered_mean = mean_filter(image_2d, "square", kernel_size) # compute focus metric ratio_default = np.ones_like(image_2d, dtype=np.float64) ratio_1 = np.divide(image_2d, image_filtered_mean, out=ratio_default, where=image_filtered_mean > 0) ratio_2 = np.divide(image_filtered_mean, image_2d, out=ratio_default, where=image_2d > 0) focus = np.where(image_2d >= image_filtered_mean, ratio_1, ratio_2) return focus
67b139fdef8b6501a64699344d80b19012876f86
3,658,149
from typing import Tuple def extract_value_from_config( config: dict, keys: Tuple[str, ...], ): """ Traverse a config dictionary to get some hyper-parameter's value. Parameters ---------- config A config dictionary. keys The possible names of a hyper-parameter. Returns ------- The hyper-parameter value. """ result = [] for k, v in config.items(): if k in keys: result.append(v) elif isinstance(v, dict): result += extract_value_from_config(v, keys) else: pass return result
d545d4c9298c74776ec52fb6b2c8d54d0e653489
3,658,150
import numpy def boundaryStats(a): """ Returns the minimum and maximum values of a only on the boundaries of the array. """ amin = numpy.amin(a[0,:]) amin = min(amin, numpy.amin(a[1:,-1])) amin = min(amin, numpy.amin(a[-1,:-1])) amin = min(amin, numpy.amin(a[1:-1,0])) amax = numpy.amax(a[0,:]) amax = max(amax, numpy.amax(a[1:,-1])) amax = max(amax, numpy.amax(a[-1,:-1])) amax = max(amax, numpy.amax(a[1:-1,0])) return amin, amax
6c007c6cf2c7c5774ca74365be8f63094864d962
3,658,151
from operator import add def offset_func(func, offset, *args): """ Offsets inputs by offset >>> double = lambda x: x * 2 >>> f = offset_func(double, (10,)) >>> f(1) 22 >>> f(300) 620 """ def _offset(*args): args2 = list(map(add, args, offset)) return func(*args2) with ignoring(Exception): _offset.__name__ = 'offset_' + func.__name__ return _offset
16526bc8302444a97ea27eb6088fe15604d3cf9e
3,658,152
def get_redshift_schemas(cursor, user): """ Get all the Amazon Redshift schemas on which the user has create permissions """ get_schemas_sql = "SELECT s.schemaname " \ "FROM pg_user u " \ "CROSS JOIN " \ "(SELECT DISTINCT schemaname FROM pg_tables) s " \ "WHERE has_schema_privilege(u.usename,s.schemaname,'create') = true " \ "AND u.usename = '" + user + "' " \ "AND s.schemaname NOT LIKE '%pg_%' " \ "AND s.schemaname NOT LIKE '%information_schema%' ;" try: cursor.execute(get_schemas_sql) schemas = cursor.fetchall() except Exception as e: logger.error('Error in executing SQL: {}'.format(get_schemas_sql)) raise e return convert_to_list(schemas)
2833205f3e1b863fe8e5a18da723cf1676a65485
3,658,153
def window_features(idx, window_size=100, overlap=10): """ Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap. """ overlap = window_size - overlap sh = (idx.size - window_size + 1, window_size) st = idx.strides * 2 view = np.lib.stride_tricks.as_strided(idx, strides=st, shape=sh)[0::overlap] return view
e10caae55424134a95c2085e5f54f73d81697e92
3,658,154
from datetime import datetime def create_suburbans_answer(from_code, to_code, for_date, limit=3): """ Creates yandex suburbans answer for date by stations codes :param from_code: `from` yandex station code :type from_code: str :param to_code: `to` yandex station code :type to_code: str :param for_date: date for which data should be received :type for_date: date :param limit: limit of segments in answer :type limit: int :return: tuple with `answer`, `is_tomorrow` and `is_error` data :rtype: tuple """ code, data = get_yandex_raw_data(from_code, to_code, for_date) if code != 200: return yandex_error_answer, False, True from_title = data["search"]["from"]["title"] to_title = data["search"]["to"]["title"] answer = "" for segment in data["segments"]: if len(answer.split("\n\n")) > limit: break if datetime_from_string(segment["departure"]) >= datetime.now(): answer += parse_yandex_segment(segment) if answer: answer = "<b>{0}</b> => <b>{1}</b>\n\n".format( from_title, to_title ) + answer is_tomorrow = False else: for_date = date.today() + timedelta(days=1) answer += create_suburbans_answer( from_code, to_code, for_date, limit=5 )[0] is_tomorrow = True return answer, is_tomorrow, False
47b34617fdcd9fe83d1c0973c420363c05b9f70c
3,658,156
def update_user(usr): """ Update user and return new data :param usr: :return object: """ user = session.query(User).filter_by(id=usr['uid']).first() user.username = usr['username'] user.first_name = usr['first_name'] user.last_name = usr['last_name'] user.email = usr['email'] session.commit() return user
d6c078c966443c609c29bb4ee046612c748bb192
3,658,157
from datetime import datetime def get_data(date_from=None, date_to=None, location=None): """Get covid data Retrieve covid data in pandas dataframe format with the time periods and countries provided. Parameters ---------- date_from : str, optional Start date of the data range with format 'YYYY-MM-DD'. By default 'None' is used to represent 7 days prior to today's date date_to : str, optional End date of data range with format 'YYYY-MM-DD'. By default 'None' is used to represent today's date location : list, optional List of target country names. By default 'None' is used for all countries. Returns ------- pandas.DataFrame Pandas dataframe of the selected covid data. Examples -------- >>> get_data(date_from="2022-01-01", date_to="2022-01-07", location=["Canada", "China"]) """ query = "@date_from <= date <= @date_to" url = "https://covid.ourworldindata.org/data/owid-covid-data.csv" if date_from is None: date_from = ( pd.to_datetime("today").normalize() - pd.to_timedelta(7, unit="d") ).strftime("%Y-%m-%d") if date_to is None: date_to = pd.to_datetime("today").normalize().strftime("%Y-%m-%d") try: date_from != datetime.strptime(date_from, "%Y-%m-%d").strftime("%Y-%m-%d") # raise ValueError except ValueError: raise ValueError( "Invalid argument value: date_from must be in format of YYYY-MM-DD. Also check if it is a valid date." ) except TypeError: raise TypeError( "Invalid argument type: date_from must be in string format of YYYY-MM-DD." ) try: date_to != datetime.strptime(date_to, "%Y-%m-%d").strftime("%Y-%m-%d") # raise ValueError except ValueError: raise ValueError( "Invalid argument value: date_to must be in format of YYYY-MM-DD. Also check if it is a valid date." ) except TypeError: raise TypeError( "Invalid argument type: date_to must be in string format of YYYY-MM-DD." ) error_msg = ( "Invalid values: date_from should be smaller or equal" " to date_to (or today's date if date_to is not specified)." ) if pd.to_datetime(date_to) < pd.to_datetime(date_from): raise ValueError( error_msg, ) if pd.to_datetime(date_to) > pd.to_datetime("today").normalize(): raise ValueError("Invalid values: date_to should be smaller or equal to today.") if location is not None: if not (isinstance(location, list)): raise TypeError( "Invalid argument type: location must be a list of strings." ) for item in location: if not (isinstance(item, str)): raise TypeError( "Invalid argument type: values inside location list must be a strings." ) query += " and location in @location" try: covid_df = pd.read_csv(url, parse_dates=["date"]) except BaseException: return "The link to the data is broken." covid_df = covid_df.query(query) covid_df = covid_df[~covid_df["iso_code"].str.startswith("OWID")] return covid_df
14067432e5b6d51b60312707cc817acbe904ef0b
3,658,158
from typing import List from typing import Dict from typing import Any def group_by_lambda(array: List[dict], func: GroupFunc) -> Dict[Any, List[dict]]: """ Convert list of objects to dict of list of object when key of dict is generated by func. Example:: grouped = group_by_lambda(detections, lambda x: x.get(DEVICE_ID)) :param array: list of objects to group :param func: give object as param and return key or None, when key is None then object will be excluded The ``func(obj, ret)`` callback provided as arg: Args: * ``obj``: next element from ``array`` * ``ret``: dictionary of just grouped objects Return effect: * ``None``: object will not be added anywhere * *some value* : object will be append to array in *some value* key Note: there are some wrappers for this functions like ``group_by_device_id()``, ``group_by_timestamp_division()``, ``group_by_timestamp_division()``, ``group_by_resolution()``. :return: dict of list of object """ ret = {} for o in array: key = func(o, ret) if key is None: continue os = get_and_set(ret, key, []) os.append(o) return ret
a92733a21b5e6e932be6d95ff79939ca26e3d429
3,658,159
def update(isamAppliance, is_primary, interface, remote, port, health_check_interval, health_check_timeout, check_mode=False, force=False): """ Updating HA configuration """ # Call to check function to see if configuration already exist update_required = _check_enable(isamAppliance, is_primary=is_primary, interface=interface, remote=remote, port=port, health_check_interval=health_check_interval, health_check_timeout=health_check_timeout) if force is True or update_required is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put("Updating HA configuration", module_uri, { "is_primary": is_primary, "interface": interface, "remote": remote, "port": port, "health_check_interval": health_check_interval, "health_check_timeout": health_check_timeout }, requires_modules=requires_module, requires_version=requires_version) else: return isamAppliance.create_return_object()
b4da64648a46e30e7220d308266e4c4cc68e25ff
3,658,160
import scipy import numpy def compare_images(image_file_name1, image_file_name2, no_print=True): """ Compare two images by calculating Manhattan and Zero norms """ # Source: http://stackoverflow.com/questions/189943/ # how-can-i-quantify-difference-between-two-images img1 = imread(image_file_name1).astype(float) img2 = imread(image_file_name2).astype(float) if img1.size != img2.size: m_norm, z_norm = 2*[2*IMGTOL] else: # Element-wise for Scipy arrays diff = img1-img2 # Manhattan norm m_norm = scipy.sum(numpy.abs(diff)) # Zero norm z_norm = scipy.linalg.norm(diff.ravel(), 0) result = bool((m_norm < IMGTOL) and (z_norm < IMGTOL)) if not no_print: print( 'Image 1: {0}, Image 2: {1} -> ({2}, {3}) [{4}]'.format( image_file_name1, image_file_name2, m_norm, z_norm, result ) ) return result
c554750ae94b5925d283e0a9d8ff198e51abe29b
3,658,161
def prepare_update_mutation_classes(): """ Here it's preparing actual mutation classes for each model. :return: A tuple of all mutation classes """ _models = get_enabled_app_models() _classes = [] for m in _models: _attrs = prepare_update_mutation_class_attributes(model=m) # Creating a fake base class for making mutate properly. _base_class = class_factory(__class_name='Update' + m.__name__, base_classes=(Mutation,), **_attrs) _attrs.update(mutate=prepare_update_mutate(model=m, _mutation_class=_base_class)) _class = class_factory(__class_name='Update' + m.__name__, base_classes=(_base_class,), **_attrs) _classes.append(_class) return tuple(_classes)
27e450ea81000e81ebbf33db5d860c9a6b0adb23
3,658,162
def vision_matched_template_get_pose(template_match): """ Get the pose of a previously detected template match. Use list operations to get specific entries, otherwise returns value of first entry. Parameters: template_match (List[MatchedTemplate3D] or MatchedTemplate3D): The template match(s) Return (Pose): The pose of the template match """ if isinstance(template_match,list): template_match = template_match[0] return template_match.pose.pose.pose
b854da7a085934f4f3aba510e76852fb8c0a440a
3,658,164
def create_rotor(model, ring_setting=0): """Factory function to create and return a rotor of the given model name.""" if model in ROTORS: data = ROTORS[model] return Rotor(model, data['wiring'], ring_setting, data['stepping']) raise RotorError("Unknown rotor type: %s" % model)
193ab444c8b5527360498cb1c8911194f04742a3
3,658,165
def compute_ess(samples): """Compute an estimate of the effective sample size (ESS). See the [Stan manual](https://mc-stan.org/docs/2_18/reference-manual/effective-sample-size-section.html) for a definition of the effective sample size in the context of MCMC. Args: samples: Tensor, vector (n,), float32 of n sequential observations. Returns: ess: float, effective sample size, >= 1, <= n. efficiency: float, >= 0.0, the relative efficiency obtained compared to the naive Monte Carlo estimate which has an efficiency of one. """ ess, efficiency = compute_ess_multidimensional( tf.reshape(samples, (1, tf.size(samples)))) ess = ess[0] efficiency = efficiency[0] return ess, efficiency
8330c4f6efb4b23c5a25be18d29c07e946731716
3,658,167
import time def uptime(): """Returns uptime in milliseconds, starting at first call""" if not hasattr(uptime, "t0") is None: uptime.t0 = time.time() return int((time.time() - uptime.t0)*1000)
ff8dbe459cf7f349741cc8ac85b12e4d1dd88135
3,658,168
def load_plot(axis, plot, x_vals, y1=None, y2=None, y3=None, y4=None, title="", xlab="", ylab="", ltype=[1, 1, 1, 1], marker=['g-', 'r-', 'b-', 'k--']): """ Function to load the matplotlib plots. :param matplotlib.Axis axis: the matplotlib axis object. :param matplotlib.FigureCanvas plot: the matplotlib plot object. :param list x_vals: list of the x values to plot. :keyword float y1: list of the first data set y values to plot. :keyword float y2: list of the second data set y values to plot. :keyword float y3: list of the third data set y values to plot. :keyword float y4: list of the fourth data set y values to plot. :keyword str title: the title for the plot. :keyword str xlab: the x axis label for the plot. :keyword str ylab: the y axis label for the plot. :keyword int ltype: list of the type of line to plot. Options are: 1 = step 2 = plot 3 = histogram 4 = date plot :keyword str marker: list of the markers to use on the plot. Defaults are: g- = green solid line r- = red solid line b- = blue solid line k- = black dashed line :return: False if successful or True if an error is encountered. :rtype: bool """ # WARNING: Refactor load_plot; current McCabe Complexity metric=23. axis.cla() axis.grid(True, which='both') _x_min = min(x_vals) _x_max = max(x_vals) _y_min = 0.0 _lst_min = [0.0] _lst_max = [] if y1 is not None: if ltype[0] == 1: line, = axis.step(x_vals, y1, marker[0], where='mid') line.set_ydata(y1) _lst_min.append(min(y1)) _lst_max.append(max(y1)) elif ltype[0] == 2: line, = axis.plot(x_vals, y1, marker[0], linewidth=2) line.set_ydata(y1) _lst_min.append(min(y1)) _lst_max.append(max(y1)) elif ltype[0] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=y1, color=marker[0]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[0] == 4: line, = axis.plot_date(x_vals, y1, marker[0], xdate=True, linewidth=2) _lst_min.append(min(y1)) _lst_max.append(max(y1)) _y_min = min(y1) if y2 is not None: if ltype[1] == 1: line2, = axis.step(x_vals, y2, marker[1], where='mid') line2.set_ydata(y2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) elif ltype[1] == 2: line2, = axis.plot(x_vals, y2, marker[1], linewidth=2) line2.set_ydata(y2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) elif ltype[1] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y2), color=marker[1]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[1] == 4: line2, = axis.plot_date(x_vals, y2, marker[1], xdate=True, linewidth=2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) _y_min = min(y2) if y3 is not None: if ltype[2] == 1: line3, = axis.step(x_vals, y3, marker[2], where='mid') line3.set_ydata(y3) _lst_min.append(min(y3)) _lst_max.append(max(y3)) elif ltype[2] == 2: line3, = axis.plot(x_vals, y3, marker[2], linewidth=2) line3.set_ydata(y3) _lst_min.append(min(y3)) _lst_max.append(max(y3)) elif ltype[2] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y3), color=marker[2]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[2] == 4: line3, = axis.plot_date(x_vals, y3, marker[2], xdate=True, linewidth=2) _lst_min.append(min(y3)) _lst_max.append(max(y3)) _y_min = min(y3) if y4 is not None: if ltype[3] == 1: line4, = axis.step(x_vals, y4, marker[3], where='mid') line4.set_ydata(y4) _lst_min.append(min(y4)) _lst_max.append(max(y4)) elif ltype[3] == 2: line4, = axis.plot(x_vals, y4, marker[3], linewidth=2) line4.set_ydata(y4) _lst_min.append(min(y4)) _lst_max.append(max(y4)) elif ltype[3] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y4), color=marker[3]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[3] == 4: line4, = axis.plot_date(x_vals, y4, marker[3], xdate=True, linewidth=2) _lst_min.append(min(y4)) _lst_max.append(max(y4)) _y_min = min(y4) axis.set_title(title, {'fontsize': 16, 'fontweight': 'bold', 'verticalalignment': 'baseline', 'horizontalalignment': 'center'}) # Set the x-axis label. _x_pos = (_x_max - _x_min) / 2.0 _y_pos = _y_min - 0.65 axis.set_xlabel(xlab, {'fontsize': 14, 'fontweight': 'bold', 'verticalalignment': 'center', 'horizontalalignment': 'center', 'x': _x_pos, 'y': _y_pos}) # Set the y-axis label. axis.set_ylabel(ylab, {'fontsize': 14, 'fontweight': 'bold', 'verticalalignment': 'center', 'horizontalalignment': 'center', 'rotation': 'vertical'}) # Get the minimum and maximum y-values to set the axis bounds. If the # maximum value is infinity, use the next largest value and so forth. _min = min(_lst_min) _max = _lst_max[0] for i in range(1, len(_lst_max)): if _max < _lst_max[i] and _lst_max[i] != float('inf'): _max = _lst_max[i] axis.set_ybound(_min, _max) plot.draw() return False
ad7499f357349fde12537c6ceeb061bf6163709d
3,658,169
def _optimize_loop_axis(dim): """ Chooses kernel parameters including CUDA block size, grid size, and number of elements to compute per thread for the loop axis. The loop axis is the axis of the tensor for which a thread can compute multiple outputs. Uses a simple heuristic which tries to get at least 4 warps per block and 8 items per thread to hide latencies. Prefers a higher item-per-thread to launching many blocks for very large axes since blocks are serialized by the GPU after all SMs are filled. Arguments: dim (int): Size of the tensor on the loop axis. Returns: tuple of grid dimension, block dimension, and items per thread """ sm_count = _get_sm_count() griddim = min(sm_count, -((-dim) // 32)) items_per_block = -((-dim) // griddim) items_per_thread = 1 warps = -((-items_per_block) // (32 * items_per_thread)) while (warps > 4 and items_per_thread < 8) or (warps > 32): items_per_thread = items_per_thread + 1 warps = -((-items_per_block) // (32 * items_per_thread)) blockdim = warps * 32 return (griddim, blockdim, items_per_thread)
8f3e77cc772dcf848de76328832c0546a68c1f09
3,658,170
def no_zero(t): """ This function replaces all zeros in a tensor with ones. This allows us to take the logarithm and then sum over all values in the matrix. Args: t: tensor to be replaced returns: t: tensor with ones instead of zeros. """ t[t==0] = 1. return t
8119d1859dc8b248f5bb09b7cc0fc3b492d9b7bd
3,658,171
def php_implode(*args): """ >>> array = Array('lastname', 'email', 'phone') >>> php_implode(",", array) 'lastname,email,phone' >>> php_implode('hello', Array()) '' """ if len(args) == 1: assert isinstance(args, list) return "".join(args) assert len(args) == 2 assert (isinstance(args[0], str) and isinstance(args[1], Array)) or \ (isinstance(args[1], str) and isinstance(args[0], Array)) _glue = args[0] if isinstance(args[0], str) else args[1] _array = args[1] if isinstance(args[1], Array) else args[0] return _glue.join([str(x) for x in _array.values()])
6f7c49ed340610c290d534a0c0edccd920a1e46e
3,658,172
import math def make_incompressible(velocity: Grid, domain: Domain, obstacles: tuple or list = (), solve_params: math.LinearSolve = math.LinearSolve(None, 1e-3), pressure_guess: CenteredGrid = None): """ Projects the given velocity field by solving for the pressure and subtracting its spatial_gradient. This method is similar to :func:`field.divergence_free()` but differs in how the boundary conditions are specified. Args: velocity: Vector field sampled on a grid domain: Used to specify boundary conditions obstacles: List of Obstacles to specify boundary conditions inside the domain (Default value = ()) pressure_guess: Initial guess for the pressure solve solve_params: Parameters for the pressure solve Returns: velocity: divergence-free velocity of type `type(velocity)` pressure: solved pressure field, `CenteredGrid` iterations: Number of iterations required to solve for the pressure divergence: divergence field of input velocity, `CenteredGrid` """ input_velocity = velocity active = domain.grid(HardGeometryMask(~union(*[obstacle.geometry for obstacle in obstacles])), extrapolation=domain.boundaries['active_extrapolation']) accessible = domain.grid(active, extrapolation=domain.boundaries['accessible_extrapolation']) hard_bcs = field.stagger(accessible, math.minimum, domain.boundaries['accessible_extrapolation'], type=type(velocity)) velocity = layer_obstacle_velocities(velocity * hard_bcs, obstacles).with_(extrapolation=domain.boundaries['near_vector_extrapolation']) div = divergence(velocity) if domain.boundaries['near_vector_extrapolation'] == math.extrapolation.BOUNDARY: div -= field.mean(div) # Solve pressure def laplace(p): grad = spatial_gradient(p, type(velocity)) grad *= hard_bcs grad = grad.with_(extrapolation=domain.boundaries['near_vector_extrapolation']) div = divergence(grad) lap = where(active, div, p) return lap pressure_guess = pressure_guess if pressure_guess is not None else domain.scalar_grid(0) converged, pressure, iterations = field.solve(laplace, y=div, x0=pressure_guess, solve_params=solve_params, constants=[active, hard_bcs]) if math.all_available(converged) and not math.all(converged): raise AssertionError(f"pressure solve did not converge after {iterations} iterations\nResult: {pressure.values}") # Subtract grad pressure gradp = field.spatial_gradient(pressure, type=type(velocity)) * hard_bcs velocity = (velocity - gradp).with_(extrapolation=input_velocity.extrapolation) return velocity, pressure, iterations, div
73904675b5d0c5b74bd13c029b52f7a6592eddac
3,658,173
def get_y_generator_method(x_axis, y_axis): """Return the y-value generator method for the given x-axis. Arguments: x_axis -- an instance of an XAxis class y_axis -- an instance of a YAxis class Returns: A reference to the y-value generator if it was found, and otherwise None. """ try: method_name = AXIS_PAIRS[x_axis.slug][y_axis.slug] except KeyError: raise ValueError("A %(x)s x-axis cannot be paired with a %(y)s y-axis" % { 'x': x_axis.__class__.name, 'y': x_axis.__class__.name }) y_method = getattr(y_axis, method_name, None) if not y_method: raise ValueError("No method named '%(method)s' exists for the %(axis)s y-axis" % { 'method': method_name, 'axis': y_axis.__class__.name }) return y_method
ab0f43743c91cfe9f51e8da3fe976f8c554af5c8
3,658,177
def generate_filename(table_type, table_format): """Generate the table's filename given its type and file format.""" ext = TABLE_FORMATS[table_format] return f'EIA_MER_{table_type}.{ext}'
076ef1e77cf4ec3c1be4fb602e5a1972eb75e826
3,658,178
def rescale_coords(df,session_epochs,maze_size_cm): """ rescale xy coordinates of each epoch into cm note: automatically detects linear track by x to y ratio input: df: [ts,x,y] pandas data frame session_epochs: nelpy epoch class with epoch times mazesize: list with size of maze in cm for each epoch output: df: rescaled df """ for i,val in enumerate(session_epochs.data): temp_df = df[df['ts'].between(val[0],val[1])] x_range = np.nanmax(temp_df.x) - np.nanmin(temp_df.x) y_range = np.nanmax(temp_df.y) - np.nanmin(temp_df.y) x_y_ratio = x_range/y_range # if the ratio of x to y is > 5, it is probably a linear track if x_y_ratio > 5: df.loc[df['ts'].between(val[0],val[1]),'x'] = rescale(temp_df.x,0,maze_size_cm[i]) df.loc[df['ts'].between(val[0],val[1]),'y'] = rescale(temp_df.y,0,maze_size_cm[i]/x_y_ratio) else: df.loc[df['ts'].between(val[0],val[1]),'x'] = rescale(temp_df.x,0,maze_size_cm[i]) df.loc[df['ts'].between(val[0],val[1]),'y'] = rescale(temp_df.y,0,maze_size_cm[i]) return df
49da12dca1e3b7e30bf909a73505a129941bd3db
3,658,179
def get_vocabulary(query_tree): """Extracts the normalized search terms from the leaf nodes of a parsed query to construct the vocabulary for the text vectorization. Arguments --------- query_tree: pythonds.trees.BinaryTree The binary tree object representing a parsed search query. Each leaf node is a search term and internal nodes represent boolean operations. See parse_query() for details. Returns ------- vocabulary: list List of strings representing unique normalized search terms. """ def _getleafnodes(node): terms = [] if node.isLeaf(): return terms + [node.normedterm] elif node.leftChild and not node.rightChild: return terms + _getleafnodes(node.getLeftChild()) elif node.rightChild and not node.leftChild: return terms + _getleafnodes(node.getRightChild()) else: # has two children return terms + _getleafnodes(node.getLeftChild()) \ + _getleafnodes(node.getRightChild()) # extract terms from the leaf nodes of the query object. terms = _getleafnodes(query_tree) # remove duplicates. vocabulary = list(set(terms)) return vocabulary
bd03f4894cd3f9a7964196bfb163335f84a048d7
3,658,181
def pubkey_to_address(pubkey): """Convert a public key (in hex) to a Bitcoin address""" return bin_to_b58check(hash_160(changebase(pubkey, 16, 256)))
bbfbe40346681a12d8b71ce8df6ef8670eb3e424
3,658,182
def find_point_in_section_list(point, section_list): """Returns the start of the section the given point belongs to. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5, 32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for them the function returns 5, and 30, 30.7 and 31 all match [30-31]. Parameters --------- point : float The point for which to match a section. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- float The start of the section the given point belongs to. None if no match was found. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_point_in_section_list(4, seclist) >>> find_point_in_section_list(5, seclist) 5 >>> find_point_in_section_list(27, seclist) 8 >>> find_point_in_section_list(31, seclist) 30 """ if point < section_list[0] or point > section_list[-1]: return None if point in section_list: if point == section_list[-1]: return section_list[-2] ind = section_list.bisect(point)-1 if ind == 0: return section_list[0] return section_list[ind] try: ind = section_list.bisect(point) return section_list[ind-1] except IndexError: return None
47d5cda15b140ba8505ee658fd46ab090b2fda8a
3,658,184
def all_reduce_sum(t, dim): """Like reduce_sum, but broadcasts sum out to every entry in reduced dim.""" t_shape = t.get_shape() rank = t.get_shape().ndims return tf.tile( tf.expand_dims(tf.reduce_sum(t, dim), dim), [1] * dim + [t_shape[dim].value] + [1] * (rank - dim - 1))
c4048c308ccf2b7550e125b63911183d097959f5
3,658,187
def get_deltas_from_bboxes_and_landmarks(prior_boxes, bboxes_and_landmarks): """Calculating bounding box and landmark deltas for given ground truth boxes and landmarks. inputs: prior_boxes = (total_bboxes, [center_x, center_y, width, height]) bboxes_and_landmarks = (batch_size, total_bboxes, [y1, x1, y2, x2, landmark_x0, landmark_y0, ..., landmark_xN, landmark_yN]) outputs: deltas = (batch_size, total_bboxes, [delta_bbox_y, delta_bbox_x, delta_bbox_h, delta_bbox_w, delta_landmark_x0, delta_landmark_y0, ..., delta_landmark_xN, delta_landmark_yN]) """ # gt_width = bboxes_and_landmarks[..., 3] - bboxes_and_landmarks[..., 1] gt_height = bboxes_and_landmarks[..., 2] - bboxes_and_landmarks[..., 0] gt_ctr_x = bboxes_and_landmarks[..., 1] + 0.5 * gt_width gt_ctr_y = bboxes_and_landmarks[..., 0] + 0.5 * gt_height # delta_x = (gt_ctr_x - prior_boxes[..., 0]) / prior_boxes[..., 2] delta_y = (gt_ctr_y - prior_boxes[..., 1]) / prior_boxes[..., 3] delta_w = gt_width / prior_boxes[..., 2] delta_h = gt_height / prior_boxes[..., 3] # total_landmarks = tf.shape(bboxes_and_landmarks[..., 4:])[-1] // 2 xy_pairs = tf.tile(prior_boxes[..., 0:2], (1, total_landmarks)) wh_pairs = tf.tile(prior_boxes[..., 2:4], (1, total_landmarks)) landmark_deltas = (bboxes_and_landmarks[..., 4:] - xy_pairs) / wh_pairs # return tf.concat([tf.stack([delta_y, delta_x, delta_h, delta_w], -1), landmark_deltas], -1)
4945723b431657b643ef8799eeabacf0a745b8d2
3,658,188
def choose(population, sample): """ Returns ``population`` choose ``sample``, given by: n! / k!(n-k)!, where n == ``population`` and k == ``sample``. """ if sample > population: return 0 s = max(sample, population - sample) assert s <= population assert population > -1 if s == population: return 1 numerator = 1 denominator = 1 for i in range(s+1, population + 1): numerator *= i denominator *= (i - s) return numerator/denominator
659eac683cae737888df74c0db21aa3ece746b33
3,658,189
def _where_cross(data,threshold): """return a list of Is where the data first crosses above threshold.""" Is=np.where(data>threshold)[0] Is=np.concatenate(([0],Is)) Ds=Is[:-1]-Is[1:]+1 return Is[np.where(Ds)[0]+1]
85fe8da97210e2eb7e3c9bca7074f0b0b88c425a
3,658,190
import csv def TVD_to_MD(well,TVD): """It returns the measure depth position for a well based on a true vertical depth Parameters ---------- well : str Selected well TVD : float Desire true vertical depth Returns ------- float MD : measure depth Attention --------- The input information comes from the files input/ubication.csv and input/survey/{well}_MD.dat. Note ---- A linear regression is used. Examples -------- >>> TVD_to_MD('WELL-1',-100) """ file="../input/survey/%s_MD.dat"%well MD,DeltaY,DeltaX=np.loadtxt(file,skiprows=1,unpack=True,delimiter=',') reader = csv.DictReader(open("../input/ubication.csv", 'r')) #'rb' dict_ubication={} for line in reader: dict_ubication[line['well']]=line z_0=float(dict_ubication[well]['masl']) x_0=float(dict_ubication[well]['east']) y_0=float(dict_ubication[well]['north']) #Initialize the delta z values z_delta=[0 for i in MD] x=[0 for i in MD] y=[0 for i in MD] z=[0 for i in MD] #Assuming straight line between points for j in range(len(MD)): if j==0: z_delta[j]=0 else: z_delta[j]=((MD[j]-MD[j-1])**2-(DeltaX[j]-DeltaX[j-1])**2-(DeltaY[j]-DeltaY[j-1])**2)**0.5+z_delta[j-1] #Convertion delta to absolute for j in range(len(MD)): z[j]=z_0-z_delta[j] #Function of X-Y-Z with MD funzmd=interpolate.interp1d(z,MD) try: MD=funzmd(TVD) except ValueError: MD=np.nan return MD
eadca9f9e5ae22fc7a6d9d31f7f0ee7ba4c26be4
3,658,191
def get_table_b_2_b(): """表 B.2 居住人数 2 人における照明設備の使用時間率 (b) 休日在宅 Args: Returns: list: 表 B.2 居住人数 2 人における照明設備の使用時間率 (b) 休日在宅 """ table_b_2_b = [ (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.25, 0.00, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (0.50, 0.25, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00, 0.25, 0.00, 0.25, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (0.75, 0.25, 0.25, 0.00, 0.25, 0.00, 0.00, 0.75, 0.25, 0.25, 0.25, 0.25, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25), (1.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.50, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.75, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.50, 0.25, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.25, 0.00, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00, 0.00), ] return table_b_2_b
06d29050c0bc61170eeddc75be76fe8bb8422edd
3,658,192
def eea(m, n): """ Compute numbers a, b such that a*m + b*n = gcd(m, n) using the Extended Euclidean algorithm. """ p, q, r, s = 1, 0, 0, 1 while n != 0: k = m // n m, n, p, q, r, s = n, m - k*n, q, p - k*q, s, r - k*s return (p, r)
56e1c59ac3a51e26d416fe5c65cf6612dbe56b8c
3,658,193
def arcball_constrain_to_axis(point, axis): """Return sphere point perpendicular to axis.""" v = np.array(point, dtype=np.float64, copy=True) a = np.array(axis, dtype=np.float64, copy=True) v -= a * np.dot(a, v) # on plane n = vector_norm(v) if n > _EPS: if v[2] < 0.0: v *= -1.0 v /= n return v if a[2] == 1.0: return np.array([1, 0, 0], dtype=np.float64) return unit_vector([-a[1], a[0], 0])
a58a80dd29ba785bd829b33ccb283e7c42993218
3,658,195
from typing import Mapping from typing import Any from typing import MutableMapping def text( node: "RenderTreeNode", renderer_funcs: Mapping[str, RendererFunc], options: Mapping[str, Any], env: MutableMapping, ) -> str: """Process a text token. Text should always be a child of an inline token. An inline token should always be enclosed by a heading or a paragraph. """ text = node.content if is_text_inside_autolink(node): return text # Escape backslash to prevent it from making unintended escapes. # This escape has to be first, else we start multiplying backslashes. text = text.replace("\\", "\\\\") text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker. text = escape_underscore_emphasis(text) # Escape emphasis/strong marker. text = text.replace("[", "\\[") # Escape link label enclosure text = text.replace("]", "\\]") # Escape link label enclosure text = text.replace("<", "\\<") # Escape URI enclosure text = text.replace("`", "\\`") # Escape code span marker # Escape "&" if it starts a sequence that can be interpreted as # a character reference. for char_refs_found, char_ref in enumerate(RE_CHAR_REFERENCE.finditer(text)): start = char_ref.start() + char_refs_found text = text[:start] + "\\" + text[start:] # The parser can give us consecutive newlines which can break # the markdown structure. Replace two or more consecutive newlines # with newline character's decimal reference. text = text.replace("\n\n", "&#10;&#10;") # If the last character is a "!" and the token next up is a link, we # have to escape the "!" or else the link will be interpreted as image. next_sibling = node.next_sibling if text.endswith("!") and next_sibling and next_sibling.type == "link": text = text[:-1] + "\\!" return text
21b39fcdd21cba692a185e4de2c6f648c210e54b
3,658,196
def patch_importlib_util_find_spec(name,package=None): """ function used to temporarily redirect search for loaders to hickle_loader directory in test directory for testing loading of new loaders """ return find_spec("hickle.tests." + name.replace('.','_',1),package)
7a0082c0af92b4d79a93ae6bbd6d1be6ec0ec357
3,658,197
def format_msg_controller(data): """Prints a formatted message from a controller :param data: The bytes from the controller message :type data: bytes """ return format_message(data, 13, "Controller")
4d1f262fd673eb3948fbc46866931ab6bd7205ee
3,658,198
def initialize_window(icon, title, width, height, graphical): # pragma: no cover """ Initialise l'environnement graphique et la fenêtre. Parameters ---------- icon : Surface Icone de la fenêtre title : str Nom de la fenêtre width : int Largeur de la fenêtre height : int Hauteur de la fenêtre graphical : bool Indique si la fenêtre doit être affichée Returns ------- Surface * Surface Un couple (surface de jeu, surface à afficher) """ game = pygame.Surface((width, height)) if graphical: pygame.display.set_icon(load_image(icon)) pygame.display.set_caption(title) return (game, pygame.display.set_mode((width, height), flags=pygame.RESIZABLE)) return (game, None)
dbc15729b0cb9548ff229ac69dd5d1f2e76c85e5
3,658,199
def get_contour_verts(cn): """unpack the SVM contour values""" contours = [] # for each contour line for cc in cn.collections: paths = [] # for each separate section of the contour line for pp in cc.get_paths(): xy = [] # for each segment of that section for vv in pp.iter_segments(): xy.append(vv[0]) paths.append(np.vstack(xy)) contours.append(paths) return contours
93dc98e758aca4390adf75afa7ef9bede2d2ac1a
3,658,200
def _compute_covariances(precisions_chol): """Compute covariancess from Cholesky decomposition of the precision matrices. Parameters ---------- precisions_chol : array-like, shape (n_components, n_features, n_features) The Cholesky decomposition of the sample precisions. Returns ------- covariances : array-like The covariance matrices corresponding to the given precision matrices. """ n_components, n_features, _ = precisions_chol.shape covariances = np.empty((n_components, n_features, n_features)) for k, prec_chol in enumerate(precisions_chol): cov_chol = sl.solve_triangular(prec_chol, np.eye(n_features), lower=True).T covariances[k] = np.dot(cov_chol, cov_chol.T) return covariances
4268a807ed2d6a61e69bd2f07ebbdbf332e030da
3,658,202
def rad2deg(angle): """ Convert radian to degree. Parameters ---------- angle : float Angle in radians Returns ------- degree : float Angle in degrees """ return (180./PI) * angle
dea3270a96cf82bb136ce4f6e873617245a4bac3
3,658,203
import csv def parse_kinetics_splits(level): """Parse Kinetics-400 dataset into "train", "val", "test" splits. Args: level (int): Directory level of data. 1 for the single-level directory, 2 for the two-level directory. Returns: list: "train", "val", "test" splits of Kinetics-400. """ def convert_label(s, keep_whitespaces=False): """Convert label name to a formal string. Remove redundant '"' and convert whitespace to '_'. Args: s (str): String to be converted. keep_whitespaces(bool): Whether to keep whitespace. Default: False. Returns: str: Converted string. """ if not keep_whitespaces: return s.replace('"', '').replace(' ', '_') else: return s.replace('"', '') def line_to_map(x, test=False): """A function to map line string to vid and label. Args: x (str): A single line from Kinetics-400 csv file. test (bool): Indicate whether the line comes from test annotation file. Returns: tuple[str, str]: (vid, label), vid is the video id, label is the video label. """ if test: # vid = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}' vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' label = -1 # label unknown return vid, label else: vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' if level == 2: vid = f'{convert_label(x[0])}/{vid}' else: assert level == 1 label = class_mapping[convert_label(x[0])] return vid, label train_file = 'data/kinetics400/annotations/kinetics_train.csv' val_file = 'data/kinetics400/annotations/kinetics_val.csv' test_file = 'data/kinetics400/annotations/kinetics_test.csv' csv_reader = csv.reader(open(train_file)) # skip the first line next(csv_reader) labels_sorted = sorted(set([convert_label(row[0]) for row in csv_reader])) class_mapping = {label: i for i, label in enumerate(labels_sorted)} csv_reader = csv.reader(open(train_file)) next(csv_reader) train_list = [line_to_map(x) for x in csv_reader] csv_reader = csv.reader(open(val_file)) next(csv_reader) val_list = [line_to_map(x) for x in csv_reader] csv_reader = csv.reader(open(test_file)) next(csv_reader) test_list = [line_to_map(x, test=True) for x in csv_reader] splits = ((train_list, val_list, test_list), ) return splits
ee2521919f9f9c3f499cd28bc6003528eb402d2b
3,658,204
def rotateContoursAbout(contours, about, degrees=90, ccw=True): """\ Rotate the given contours the given number of degrees about the point about in a clockwise or counter-clockwise direction. """ rt = Transform.rotationAbout(about, degrees, ccw) return rt.applyToContours(contours)
c929a8c412f4b3fe9b70c21dde62b0672f575abc
3,658,205
import torch def coordinate_addition(v, b, h, w, A, B, psize): """ Shape: Input: (b, H*W*A, B, P*P) Output: (b, H*W*A, B, P*P) """ assert h == w v = v.view(b, h, w, A, B, psize) coor = torch.arange(h, dtype=torch.float32) / h coor_h = torch.cuda.FloatTensor(1, h, 1, 1, 1, psize).fill_(0.) coor_w = torch.cuda.FloatTensor(1, 1, w, 1, 1, psize).fill_(0.) coor_h[0, :, 0, 0, 0, 0] = coor coor_w[0, 0, :, 0, 0, 1] = coor v = v + coor_h + coor_w v = v.view(b, h * w * A, B, psize) return v
9eeb906539a61b887216c59faf3ac2928e999d6c
3,658,206
import uuid def ticket() -> str: """生成请求饿百接口所需的ticket参数""" return str(uuid.uuid1()).upper()
aaf1135d6ef5e61aa65960c5c38007848cbd0b17
3,658,207
def create_WchainCNOT_layered_ansatz(qc: qiskit.QuantumCircuit, thetas: np.ndarray, num_layers: int = 1): """Create WchainCNOT layered ansatz Args: - qc (qiskit.QuantumCircuit): init circuit - thetas (np.ndarray): parameters - n_layers (Int): numpy of layers Returns: - qiskit.QuantumCircuit """ n = qc.num_qubits if isinstance(num_layers, int) != True: num_layers = (num_layers['num_layers']) if len(thetas) != num_layers * (n * 3): raise Exception( 'Number of parameters must be equal n_layers * num_qubits * 3') for i in range(0, num_layers): phis = thetas[i * (n * 3):(i + 1) * (n * 3)] qc = create_WchainCNOT(qc) qc.barrier() qc = create_rz_nqubit(qc, phis[:n]) qc = create_rx_nqubit(qc, phis[n:n * 2]) qc = create_rz_nqubit(qc, phis[n * 2:n * 3]) return qc
488950214a26cdcf1812524511561d19baa9dfc9
3,658,208
import itertools def birth_brander(): """ This pipeline operator will add or update a "birth" attribute for passing individuals. If the individual already has a birth, just let it float by with the original value. If it doesn't, assign the individual the current birth ID, and then increment the global, stored birth count. We don't increment a birth ID in the ctor because that overall birth count will bloat due to clone operations. Inserting this operator into the pipeline will ensure that each individual that passes through is properly "branded" with a unique birth ID. However, care must be made to ensure that the initial population is similarly branded. Provides: * brand_population() to brand an entire population all at once, which is useful for branding initial populations. * brand() for explicitly branding a single individual :param next_thing: preceding individual in the pipeline :return: branded individual """ # incremented with each birth num_births = itertools.count() # sometimes next_thing is a population, so we need this to track that # the next individual in the population iterator = None def brand(individual): """ brand the given individual :param individual: to be branded :return: branded individual """ if not hasattr(individual, "birth"): # Only assign a birth ID if they don't already have one individual.birth = next(num_births) return individual def brand_population(population): """ We want to brand an entire population in one go Usually used to brand an initial population is one shot. :param population: to be branded :return: branded population """ return [brand(i) for i in population] def do_birth_branding(next_thing): """ This has the flexibility of being inserted in a pipeline such that the preceding pipeline is a population or a generator that provides an individual. It'll flexibly handle either situation. :param next_thing: either the next individual in the pipeline or a population of individuals to be branded :return: branded individual """ nonlocal num_births nonlocal iterator while True: if is_iterable(next_thing): # We're being passed in a single individual in a pipeline next_thing = next(next_thing) else: # We're being passed a test_sequence/population if iterator is None: iterator = iter(next_thing) next_thing = next(iterator) next_thing = brand(next_thing) yield next_thing do_birth_branding.brand_population = brand_population return do_birth_branding
dd2c1ef2e9ac2f56436e10829ca9c0685439ce6d
3,658,209
def random_fit_nonnegative(values, n): """ Generates n random values using a normal distribution fitted from values used as argument. Returns only non-negative values. :param values: array/list to use as model fot the random data :param n: number of random elements to return :returns: an array of n random non-negative numbers """ values = np.array(values) mean = np.mean(values) sd = np.std(values) random_values = np.empty(0) offset = 0.05 # 5% offset to compensate values less than 0 while len(random_values) < n: random_values = np.round(np.random.normal(mean, sd, round(n * (1 + offset)))) random_values = random_values[random_values >= 0] # If the while loop check fail, next time will try with a larger offset offset *= 2 # slice n first elements and shape the array to int return random_values[:n].astype("int")
9591b87b36c668681873fda1969d1710e7a2dd8b
3,658,211
def channel_values(channel_freqs, channel_samples, dt, t): """Computes value of channels with given frequencies, samples, sample size and current time. Args: channel_freqs (array): 1d array of channel frequencies channel_samples (array): 2d array of channel samples, the first index being time step and the second index indexing channel dt (float): size of each sample t (float): current time Returns: array: array of channel values at the given time """ sample_idx = int(t // dt) if sample_idx >= len(channel_samples): sample_idx = len(channel_samples) - 1 sample_vals = channel_samples[sample_idx] return np.real(sample_vals * np.exp(1j * 2 * np.pi * channel_freqs * t))
cdc555a5ab2d21c0dba71f2f24386144796898c1
3,658,213
def get_small_corpus(num=10000): """ 获取小型文本库,用于调试网络模型 :param num: 文本库前n/2条对联 :return: 默认返回前500条对联(1000句话)的list """ list = getFile('/total_list.json') return list[:num]
032ea34eaa6b5e1478e3770c91fa3da3214d907b
3,658,215
from tqdm import tqdm_notebook def groupby_apply2(df_1, df_2, cols, f, tqdn=True): """Apply a function `f` that takes two dataframes and returns a dataframe. Groups inputs by `cols`, evaluates for each group, and concatenates the result. """ d_1 = {k: v for k,v in df_1.groupby(cols)} d_2 = {k: v for k,v in df_2.groupby(cols)} if tqdn: progress = tqdm_notebook else: progress = lambda x: x arr = [] for k in progress(d_1): arr.append(f(d_1[k], d_2[k])) return pd.concat(arr)
082ce61477c116ac421ab086e68b040dfc04ffff
3,658,216
from datetime import datetime import pytz def login(request): """Logs in the user if given credentials are valid""" username = request.data['username'] password = request.data['password'] try: user = User.objects.get(username=username) except: user = None if user is not None: encoded = user.password hasher = PBKDF2PasswordHasher() login_valid = hasher.verify(password, encoded) if login_valid: key = username + str(datetime.datetime.now()) key = hasher.encode(key, 'key', 10) life = datetime.datetime.now() + datetime.timedelta(hours=14) timezone = pytz.timezone("America/Bogota") life_aware = timezone.localize(life) loginsession = LoginSession(key=key, life=life_aware, user=user) loginsession.save() request.session['loginsession'] = key data = { 'success': True, 'key': key } return Response(data, status=status.HTTP_200_OK, content_type='application/json') data = { 'success': False, 'message':"Nombre de usuario o contraseña incorrectos" } return Response(data, status=status.HTTP_200_OK, content_type='application/json')
79add2a805a36cd3339aeecafb7d0af95e42d2e5
3,658,217
def replace_data_in_gbq_table(project_id, table_id, complete_dataset): """ replacing data in Google Cloud Table """ complete_dataset.to_gbq( destination_table=table_id, project_id=project_id, credentials=credentials, if_exists="replace", ) return None
1de5464cdce77f94857abe46a93f7b64f5e2dd1e
3,658,218
def default_pubkey_inner(ctx): """Default expression for "pubkey_inner": tap.inner_pubkey.""" return get(ctx, "tap").inner_pubkey
9333a62c0111f28e71c202b5553d7f2a8c4f71ce
3,658,219
def quantize_8(image): """Converts and quantizes an image to 2^8 discrete levels in [0, 1].""" q8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True) return tf.cast(q8, tf.float32) * (1.0 / 255.0)
d822ff34b9941c6a812a69766de3483c2348e7da
3,658,220
def get_clients( wlc, *vargs, **kvargs ): """ create a single dictionary containing information about all associated stations. """ rsp = wlc.rpc.get_stat_user_session_status() ret_data = {} for session in rsp.findall('.//USER-SESSION-STATUS'): locstat = session.find('.//USER-LOCATION-MEMBER') ret_data[session.get('mac-addr')] = dict(session.attrib) ret_data[session.get('mac-addr')].update(locstat.attrib) return ret_data
c4ab5941033632d7f2b95bc23878f0464d12adb7
3,658,221
def coalmine(eia923_dfs, eia923_transformed_dfs): """Transforms the coalmine_eia923 table. Transformations include: * Remove fields implicated elsewhere. * Drop duplicates with MSHA ID. Args: eia923_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a page from the EIA923 form, as reported in the Excel spreadsheets they distribute. eia923_transformed_dfs (dict): A dictionary of DataFrame objects in which pages from EIA923 form (keys) correspond to normalized DataFrames of values from that page (values). Returns: dict: eia923_transformed_dfs, a dictionary of DataFrame objects in which pages from EIA923 form (keys) correspond to normalized DataFrames of values from that page (values). """ # These are the columns that we want to keep from FRC for the # coal mine info table. coalmine_cols = ['mine_name', 'mine_type', 'state', 'county_id_fips', 'mine_id_msha'] # Make a copy so we don't alter the FRC data frame... which we'll need # to use again for populating the FRC table (see below) cmi_df = eia923_dfs['fuel_receipts_costs'].copy() # Keep only the columns listed above: cmi_df = _coalmine_cleanup(cmi_df) cmi_df = cmi_df[coalmine_cols] # If we actually *have* an MSHA ID for a mine, then we have a totally # unique identifier for that mine, and we can safely drop duplicates and # keep just one copy of that mine, no matter how different all the other # fields associated with the mine info are... Here we split out all the # coalmine records that have an MSHA ID, remove them from the CMI # data frame, drop duplicates, and then bring the unique mine records # back into the overall CMI dataframe... cmi_with_msha = cmi_df[cmi_df['mine_id_msha'] > 0] cmi_with_msha = cmi_with_msha.drop_duplicates(subset=['mine_id_msha', ]) cmi_df.drop(cmi_df[cmi_df['mine_id_msha'] > 0].index) cmi_df.append(cmi_with_msha) cmi_df = cmi_df.drop_duplicates(subset=['mine_name', 'state', 'mine_id_msha', 'mine_type', 'county_id_fips']) # drop null values if they occur in vital fields.... cmi_df.dropna(subset=['mine_name', 'state'], inplace=True) # we need an mine id to associate this coalmine table with the frc # table. In order to do that, we need to create a clean index, like # an autoincremeted id column in a db, which will later be used as a # primary key in the coalmine table and a forigen key in the frc table # first we reset the index to get a clean index cmi_df = cmi_df.reset_index() # then we get rid of the old index cmi_df = cmi_df.drop(labels=['index'], axis=1) # then name the index id cmi_df.index.name = 'mine_id_pudl' # then make the id index a column for simpler transferability cmi_df = cmi_df.reset_index() cmi_df = PUDL_META.get_resource("coalmine_eia923").encode(cmi_df) eia923_transformed_dfs['coalmine_eia923'] = cmi_df return eia923_transformed_dfs
eb420428dcb2dceeeab1c5bbdceee7c7da2e5c11
3,658,222
import random def _throw_object_x_at_y(): """ Interesting interactions: * If anything is breakable :return: """ all_pickupable_objects_x = env.all_objects_with_properties({'pickupable': True}) x_weights = [10.0 if (x['breakable'] or x['mass'] > 4.0) else 1.0 for x in all_pickupable_objects_x] if len(all_pickupable_objects_x) == 0: raise ValueError('No pickupable objects') all_objects_y = env.all_objects_with_properties({'pickupable': True}) y_weights = [10.0 if (y['breakable'] and not y['pickupable']) else ( 4.0 if y['breakable'] else 1.0) for y in all_objects_y] object_x = all_pickupable_objects_x[_weighted_choice(x_weights)] object_y = all_objects_y[_weighted_choice(y_weights)] if object_x['objectId'] == object_y['objectId']: raise ValueError('objects are the same?') ##################### hardness_options = {'softly': 10.0, 'normally': 100.0, 'aggressively': 1000.0} hardness = random.choice(sorted(hardness_options.keys())) renv = RecordingEnv(env, text=f'Throw $1 at $2 {hardness}.', main_object_ids=(object_x['objectId'], object_y['objectId']) ) s_a = pickup_object(renv, object_x['objectId'], navigate=True) print("Pickup {} succeeds".format(object_x['objectId']), flush=True) path2use = path_to_object(renv.env, object_y, angle_noise=0, dist_to_obj_penalty=0.1) while len(path2use) > 0 and path2use[-1]['action'].startswith(('Rotate', 'Look')): path2use.pop(-1) for p in path2use: renv.step(p) # Teleport, throw, then snap back to grid # Face object old_pos = renv.env.get_agent_location() new_pos = {k: v for k, v in old_pos.items()} new_pos['rotation'] = rotation_angle_to_object(object_y, renv.env.get_agent_location()) new_pos['horizon'] = horizon_angle_to_object(object_y, renv.env.get_agent_location()) renv.env.teleport_agent_to(**new_pos, ignore_y_diffs=True, only_initially_reachable=False) if not renv.env.last_action_success: raise ValueError("teleport failed") if renv.env.get_agent_location()['y'] < -10: raise ValueError("negative coords") s_b = renv.step(dict(action='ThrowObject', moveMagnitude=hardness_options[hardness], forceAction=True)) # If something broke then things are interesting is_interesting = s_b and any([(x['isBroken'] or 'Cracked' in x['objectType']) for x in renv.new_items.values()]) renv.env.teleport_agent_to(**old_pos, ignore_y_diffs=True) return renv, is_interesting
03f6c6a99754d79d94df5a4f857ae358db663081
3,658,223
def plot( X, color_by=None, color_map="Spectral", colors=None, edges=None, axis_limits=None, background_color=None, marker_size=1.0, figsize_inches=(8.0, 8.0), savepath=None, ): """Plot an embedding, in one, two, or three dimensions. This function plots embeddings. The input embedding's dimension should be at most 3. The embedding is visualized as a scatter plot. The points can optionally be colored according to categorical or continuous values, or according to a pre-defined sequence of colors. Additionally, edges can optionally be superimposed. Arguments --------- X: array-like The embedding to plot, of shape ``(n_items, embedding_dim)``. The second dimension should be 1, 2, or 3. color_by: array-like, optional A sequence of values, one for each item, which should be used to color each embedding vector. These values may either be categorical or continuous. For example, if ``n_items`` is 4, .. code:: python3 np.ndarray(['dog', 'cat', 'zebra', 'cat']) np.ndarray([0, 1, 1, 2] np.ndarray([0.1, 0.5, 0.31, 0.99] are all acceptable. The first two are treated as categorical, the third is continuous. A finite number of colors is used when the values are categorical, while a spectrum of colors is used when the values are continuous. color_map: str or matplotlib colormap instance Color map to use when resolving ``color_by`` to colors; ignored when ``color_by`` is None. colors: array-like, optional A sequence of colors, one for each item, specifying the exact color each item should be colored. Each row must represent an RGBA value. Only one of ``color_by`` and ``colors`` should be non-None. edges: array-like, optional List of edges to superimpose over the scatter plot, shape ``(any, 2)`` axis_limits: tuple, optional tuple ``(limit_low, limit_high)`` of axis limits, applied to both the x and y axis. background_color: str, optional color of background marker_size: float, optional size of each point in the scatter plot figsize_inches: tuple size of figures in inches: ``(width_inches, height_inches)`` savepath: str, optional path to save the plot. Returns ------- matplotlib.Axes: Axis on which the embedding is plotted. """ if color_by is not None and colors is not None: raise ValueError("Only one of 'color_by` and `colors` can be non-None") ax = _plot( X=X, color_by=color_by, cmap=color_map, colors=colors, edges=edges, lim=axis_limits, background_color=background_color, s=marker_size, figsize=figsize_inches, ) if savepath is not None: plt.savefig(savepath) return ax
f6c5ef6084278bcd3eb81c9286af53594aca4a1e
3,658,224
def CausalConvIntSingle(val, time, kernel): """ Computing convolution of time varying data with given kernel function. """ ntime = time.size dt_temp = np.diff(time) dt = np.r_[time[0], dt_temp] out = np.zeros_like(val) for i in range(1, ntime): temp = 0. if i==0: temp += val[0]*kernel(time[i]-time[0])*dt[0]*0.5 for k in range(1,i+1): temp += val[k-1]*kernel(time[i]-time[k-1])*dt[k]*0.5 temp += val[k]*kernel(time[i]-time[k])*dt[k]*0.5 out[i] = temp return out
a4e94bfe2213c428042df4e561f584ffede3f9ab
3,658,225
def sbox1(v): """AES inverse S-Box.""" w = mpc.to_bits(v) z = mpc.vector_add(w, B) y = mpc.matrix_prod([z], A1, True)[0] x = mpc.from_bits(y)**254 return x
c10e9d440e1c1149c8d2b0f9fbd3fd5d4868596c
3,658,226
def _get_photon_info_COS(tag, x1d, traceloc='stsci'): """ Add spectral units (wavelength, cross dispersion distance, energy/area) to the photon table in the fits data unit "tag". For G230L, you will get several 'xdisp' columns -- one for each segment. This allows for the use of overlapping background regions. Parameters ---------- tag x1d traceloc Returns ------- xdisp, order """ if x1d is not None: xd, xh = x1d[1].data, x1d[1].header det = tag[0].header['detector'] segment = tag[0].header['segment'] data_list = [] for i,t in enumerate(tag): if t.name != 'EVENTS': continue td,th = t.data, t.header """ Note: How STScI extracts the spectrum is unclear. Using 'y_lower/upper_outer' from the x1d reproduces the x1d gross array, but these results in an extraction ribbon that has a varying height and center -- not the parallelogram that is described in the Data Handbook as of 2015-07-28. The parameters in the xtractab reference file differ from those populated in the x1d header. So, I've punted and stuck with using the x1d header parameters because it is easy and I think it will make little difference for most sources. The largest slope listed in the xtractab results in a 10% shift in the spectral trace over the length of the detector. In general, I should just check to be sure the extraction regions I'm using are reasonable. """ data = [td[s] for s in ['time', 'wavelength', 'epsilon', 'dq', 'pha']] if det == 'NUV': # all "orders" (segments) of the NUV spectra fall on the same detector and are just offset in y, # I'll just duplicate the events for each spectrum segs = [s[-1] for s in xd['segment']] orders = list(range(len(segs))) else: seg = segment[-1] segs = [seg] orders = [0 if seg == 'A' else 1] for order, seg in zip(orders, segs): if not (traceloc == 'stsci' or type(traceloc) in [int, float]) and det == 'NUV': raise NotImplementedError('NUV detector has multiple traces on the same detector, so custom traceloc ' 'has not been implemented.') if traceloc == 'stsci': yspec = xh['SP_LOC_'+seg] elif traceloc == 'median': Npixx = th['talen2'] x, y = td['xfull'], td['yfull'] yspec = _median_trace(x, y, Npixx, 8) elif traceloc == 'lya': Npixy = th['talen3'] yspec = _lya_trace(td['wavelength'], td['yfull'], Npixy) elif type(traceloc) in [int, float]: yspec = float(traceloc) else: raise ValueError('traceloc={} not recognized.'.format(traceloc)) xdisp = td['yfull'] - yspec order_vec = _np.ones_like(xdisp, 'i2')*order if det == 'NUV': w = data[1] keep = (xdisp > -15.) & (xdisp < 15.) x = td['xfull'] xref, wref = x[keep], w[keep] isort = _np.argsort(xref) xref, wref = xref[isort], wref[isort] wnew = _np.interp(x, xref, wref) data_list.append(data[:1] + [wnew] + data[2:] + [xdisp, order_vec]) else: data_list.append(data + [xdisp, order_vec]) data = list(map(_np.hstack, list(zip(*data_list)))) return data
d1f06d6b8b26894a3297471c74c291cd15b3cb22
3,658,227
def maximum_value(tab): """ brief: return maximum value of the list args: tab: a list of numeric value expects at leas one positive value return: the max value of the list the index of the max value raises: ValueError if expected a list as input ValueError if no positive value found """ if not(isinstance(tab, list)): raise ValueError('Expected a list as input') valMax = 0.0 valMaxIndex = -1; nPositiveValues = 0 for i in range(len(tab)): if tab[i] >= 0 and tab[i] > valMax: valMax = float(tab[i]) valMaxIndex = i nPositiveValues += 1 if nPositiveValues <= 0: raise ValueError('No positive value found') return valMax, valMaxIndex
1c31daf3a953a9d781bc48378ef53323313dc22a
3,658,228
import mpmath def pdf(x, k, loc, scale): """ Probability density function for the Weibull distribution (for minima). This is a three-parameter version of the distribution. The more typical two-parameter version has just the parameters k and scale. """ with mpmath.extradps(5): x = mpmath.mpf(x) k, loc, scale = _validate_params(k, loc, scale) if x == loc: if k < 1: return mpmath.mp.inf elif k == 1: return 1/scale else: return mpmath.mp.zero if x < loc: return mpmath.mp.zero return mpmath.exp(logpdf(x, k, loc, scale))
77efc3f7ceda57377a412dc7641114cea3562953
3,658,229
def rescale_column_test(img, img_shape, gt_bboxes, gt_label, gt_num): """rescale operation for image of eval""" img_data, scale_factor = mmcv.imrescale(img, (config.img_width, config.img_height), return_scale=True) if img_data.shape[0] > config.img_height: img_data, scale_factor2 = mmcv.imrescale(img_data, (config.img_height, config.img_height), return_scale=True) scale_factor = scale_factor*scale_factor2 pad_h = config.img_height - img_data.shape[0] pad_w = config.img_width - img_data.shape[1] assert ((pad_h >= 0) and (pad_w >= 0)) pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype) pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data img_shape = np.append(img_shape, (scale_factor, scale_factor)) img_shape = np.asarray(img_shape, dtype=np.float32) return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num)
11c7be91988aba926e5d9934443545a5112d2525
3,658,231
def resolvability_query(m, walks_): """ :param m: cost matrix :param walks_: list of 0-percolation followed by its index of redundancy as returned by percolation_finder :return: M again untouched, followed by the list of $0$-percolation with minimal index of redundancy, and with a flag, True if the minimal index is 0 and so we have already our solution, False otherwise. """ min_redundancy = np.min(walks_[1::2]) filtered_walks = [walks_[i] for i in list(range(len(walks_)))[::2] \ if walks_[i + 1] == min_redundancy] if min_redundancy == 0: flag = True else: flag = False return [m, filtered_walks, flag]
968acb44a94952187cd91ed50ee2f7c1d1f0f54f
3,658,233
import math def dsh( incidence1: float, solar_az1: float, incidence2: float, solar_az2: float ): """Returns the Shadow-Tip Distance (dsh) as detailed in Becker et al.(2015). The input angles are assumed to be in radians. This is defined as the distance between the tips of the shadows in the two images for a hypothetical vertical post of unit height. The "shadow length" describes the shadow of a hypothetical pole so it applies whether there are actually shadows in the image or not. It's a simple and consistent geometrical way to quantify the difference in illumination. This quantity is computed analogously to dp. """ def shx(inc: float, sunazgnd: float): return -1 * math.tan(inc) * math.cos(sunazgnd) def shy(inc: float, sunazgnd: float): return math.tan(inc) * math.sin(sunazgnd) shx1 = shx(incidence1, solar_az1) shx2 = shx(incidence2, solar_az2) shy1 = shy(incidence1, solar_az1) shy2 = shy(incidence2, solar_az2) return math.sqrt(math.pow(shx1 - shx2, 2) + math.pow(shy1 - shy2, 2))
5aef1c9d7ffeb3e8534568a53cf537d26d97324a
3,658,235
def similarity(vec1, vec2): """Cosine similarity.""" return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
22a97fc08b4a8d7b662d0ba38eb6338aad587ca2
3,658,236
import _warnings def epv00(date1, date2): """ Earth position and velocity, heliocentric and barycentric, with respect to the Barycentric Celestial Reference System. :param date1, date2: TDB as a two-part Julian date. :type date1, date2: float :returns: a tuple of two items: * heliocentric Earth position velocity as a numpy.matrix of shape \ 2x3. * barycentric Earth position/velocity as a numpy.matrix of shape \ 2x3. :raises: :exc:`UserWarning` if the date falls outside the range 1900-2100. .. seealso:: |MANUAL| page 79 """ pvh = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C')) pvb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C')) s = _sofa.iauEpv00(date1, date2, pvh, pvb) if s != 0: _warnings.warn(_epv00_msg[s], UserWarning, 2) return pvh, pvb
bb2c97517966168beb07e1732231bb0388eca0f3
3,658,237
def algorithm_conflict(old_config, new_config): """Generate an algorithm configuration conflict""" return conflicts.AlgorithmConflict(old_config, new_config)
8f9a1dcbf90b38efd69e028a35591d4d424d72c4
3,658,238
def nin(): """ :return: """ def nin_block(num_channels, kernel_size, strides, padding): blk = nn.Sequential() blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu')) return blk net = nn.Sequential() net.add(nin_block(96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), # 标签类别数是10 nin_block(10, kernel_size=3, strides=1, padding=1), # 全局平均池化层将窗口形状自动设置成输入的高和宽 nn.GlobalAvgPool2D(), # 将四维的输出转成二维的输出,其形状为(批量大小, 10) nn.Flatten()) X = nd.random.uniform(shape=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape:\t', X.shape) lr, num_epochs, batch_size, ctx = 0.1, 5, 128, d2l.try_gpu() net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier()) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224) d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
8641d6b34256168d09d2ab3c6fa0d4a0aff71410
3,658,239
def factorial(n): """ Return the product of the integers 1 through n. n must be a nonnegative integer. """ return product(range(2, n + 1))
132b772d27e661816979ea9a5f2fa3b53114b55c
3,658,240
def get_status_lines(frames, check_transposed=True): """ Extract status lines from the given frames. `frames` can be 2D array (one frame), 3D array (stack of frames, first index is frame number), or list of array. Automatically check if the status line is present; return ``None`` if it's not. If ``check_transposed==True``, check for the case where the image is transposed (i.e., line becomes a column). """ if isinstance(frames,list): return [get_status_lines(f,check_transposed=check_transposed) for f in frames] if frames.shape[-1]>=4: lines=_extract_line(frames,True) if _check_magic(lines): return lines lines=_extract_line(frames,False) if _check_magic(lines): return lines if check_transposed: tframes=frames.T if frames.ndim==2 else frames.transpose((0,2,1)) return get_status_lines(tframes,check_transposed=False) return None
8bc68246b0c4987836414810a0308a2034b16368
3,658,241
def quote(): """Get stock quote.""" if request.method == "POST": quote = lookup(request.form.get("symbol")) if quote == None: return apology("invalid symbol", 400) return render_template("quoted.html", quote=quote) # User reached route via GET (as by clicking a link or via redi) else: return render_template("quote.html")
fb9d4b54e97a4d7b104f3c0d361347b99db68195
3,658,242
import json def stats(api, containers=None, stream=True): """Get container stats container When stream is set to true, the raw HTTPResponse is returned. """ path = "/containers/stats" params = {'stream': stream} if containers is not None: params['containers'] = containers try: response = api.get(path, params=params) if stream: return response return json.loads(str(response.read(), 'utf-8')) except errors.NotFoundError as e: api.raise_not_found(e, e.response, errors.ContainerNotFound)
8e8da5ab96ab14871e3a5de363d8cae66fba5701
3,658,243
def add_engineered(features): """Add engineered features to features dict. Args: features: dict, dictionary of input features. Returns: features: dict, dictionary with engineered features added. """ features["londiff"] = features["dropofflon"] - features["pickuplon"] features["latdiff"] = features["dropofflat"] - features["pickuplat"] features["euclidean"] = tf.math.sqrt( features["londiff"]**2 + features["latdiff"]**2) return features
56efe3ad922f5068c91ac702366416210e95dd74
3,658,244
def test_3tris(): """3 triangles""" conv = ToPointsAndSegments() polygons = [ [[(0, 0), (1, 0), (0.5, -0.5), (0, 0)]], [[(1, 0.5), (2, 0.5), (1.5, 1), (1, 0.5)]], [[(2, 0), (3, 0), (2.5, -0.5), (2, 0)]], ] for polygon in polygons: conv.add_polygon(polygon) return conv, 24, 16, 8
fc9504e9c3ca0ae251ed67f8c99530ac6a1de73c
3,658,245
def program_modules_with_functions(module_type, function_templates): """ list the programs implementing a given set of functions """ prog_lsts = [program_modules_with_function(module_type, function_template) for function_template in function_templates] # get the intersection of all of them progs = _reduce(set.intersection, map(set, prog_lsts)) return tuple(sorted(progs))
c3cfd6ee6c9fdcca3926015016e5d28a2a1f599d
3,658,246
def tasmax_below_tasmin( tasmax: xarray.DataArray, tasmin: xarray.DataArray, ) -> xarray.DataArray: """Check if tasmax values are below tasmin values for any given day. Parameters ---------- tasmax : xarray.DataArray tasmin : xarray.DataArray Returns ------- xarray.DataArray, [bool] Examples -------- To gain access to the flag_array: >>> from xclim.core.dataflags import tasmax_below_tasmin >>> ds = xr.open_dataset(path_to_tas_file) >>> flagged = tasmax_below_tasmin(ds.tasmax, ds.tasmin) """ tasmax_lt_tasmin = _sanitize_attrs(tasmax < tasmin) description = "Maximum temperature values found below minimum temperatures." tasmax_lt_tasmin.attrs["description"] = description tasmax_lt_tasmin.attrs["units"] = "" return tasmax_lt_tasmin
c112dcf5b1a89f20151daaea8d56ea8b08262886
3,658,247
import torch def laplace_attention(q, k, v, scale, normalize): """ Laplace exponential attention Parameters ---------- q : torch.Tensor Shape (batch_size, m, k_dim) k : torch.Tensor Shape (batch_size, n, k_dim) v : torch.Tensor Shape (batch_size, n, v_dim) scale : float scale in the L1 distance normalize : bool does the weights sum to 1? Returns ------- r : torch.Tensor Shape (batch_size, m, v_dim) """ k = k.unsqueeze(1) # shape [B, 1, n, k_dim] q = q.unsqueeze(2) # shape [B, m, 1, k_dim] unnorm_weights = - torch.abs((k - q) / scale) # shape [B, m, n, k_dim] unnorm_weights = torch.mean(weights, dim=-1) # shape [B, m, n] if normalize: weight_fn = F.softmax else: weight_fn = lambda x: 1 + torch.tanh(x) weights = weight_fn(unnorm_weights) # shape [B, m, n] r = torch.einsum('bij,bjk->bik', weights, v) # shape [B, m, v_dim] return r
600ac2f75e5396dfe6e169776425229ffedbc884
3,658,248
def instantiate(class_name, *args, **kwargs): """Helper to dynamically instantiate a class from a name.""" split_name = class_name.split(".") module_name = split_name[0] class_name = ".".join(split_name[1:]) module = __import__(module_name) class_ = getattr(module, class_name) return class_(*args, **kwargs)
d5906c835de9c2e86fbe3c15a9236662d6c7815d
3,658,249
def fawa(pv_or_state, grid=None, levels=None, interpolate=None): """Finite-Amplitude Wave Activity according to Nakamura and Zhu (2010). - If the first parameter is not a `barotropic.State`, `grid` must be specified. - `levels` specifies the number of contours generated for the equivalent latitude zonalization. - By default, FAWA is returned on the computed equivalent latitudes. To obtain FAWA interpolated to a specific set of latitudes, specify these with the `interpolate` parameter. Returns a tuple containing FAWA and its latitude coordinates. """ grid, pv = _get_grid_vars(["pv"], grid, pv_or_state) # Compute zonalized background state of PV qq, yy = grid.zonalize_eqlat(pv, levels=levels, interpolate=None, quad="sptrapz") # Use formulation that integrates PV over areas north of PV # contour/equivalent latitude and then computes difference q_int = np.vectorize(lambda q: grid.quad_sptrapz(pv, pv - q)) y_int = np.vectorize(lambda y: grid.quad_sptrapz(pv, grid.lat - y)) # Normalize by zonal circumference at each latitude fawa = (q_int(qq) - y_int(yy)) / grid.circumference(yy) # Interpolate to a given set of latitudes if specified if interpolate is not None: fawa = np.interp(interpolate, yy, fawa, left=0, right=0) yy = interpolate return fawa, yy
91ca98bcb5abf71100ec9716f11c5cd38688836d
3,658,250
def bmm_update(context, bmm_id, values, session=None): """ Updates Bare Metal Machine record. """ if not session: session = get_session_dodai() session.begin() bmm_ref = bmm_get(context, bmm_id, session=session) bmm_ref.update(values) bmm_ref.save(session=session) return bmm_ref
71c73582c9f6b96ffc5021598c8ef017ccb5af83
3,658,251
from datetime import datetime def to_unified(entry): """ Convert to a unified entry """ assert isinstance(entry, StatementEntry) date = datetime.datetime.strptime(entry.Date, '%d/%m/%Y').date() return UnifiedEntry(date, entry.Reference, method=entry.Transaction_Type, credit=entry.Money_In, debit=entry.Money_Out)
d6eca8cbd970931569a2ad740298578c1106e7c9
3,658,252
def edit_profile(): """ POST endpoint that edits the student profile. """ user = get_current_user() json = g.clean_json user.majors = Major.objects.filter(id__in=json['majors']) user.minors = Minor.objects.filter(id__in=json['minors']) user.interests = Tag.objects.filter(id__in=json['interests']) user.save() return _fetch_user_profile(user)
4548c4621f31bbd159535b7ea0768167655b4f5b
3,658,253
import six def _stringcoll(coll): """ Predicate function to determine whether COLL is a non-empty collection (list/tuple) containing only strings. Arguments: - `coll`:* Return: bool Exceptions: None """ if isinstance(coll, (list, tuple)) and coll: return len([s for s in coll if isinstance(s, six.string_types)]) == len(coll) return False
9490a973900e230f70fea112f250cfe29be3a8bc
3,658,254
import contextlib def create_user_db_context( database=Database(), *args, **kwargs): """ Create a context manager for an auto-configured :func:`msdss_users_api.tools.create_user_db_func` function. Parameters ---------- database : :class:`msdss_base_database:msdss_base_database.core.Database` Database to use for managing users. *args, **kwargs Additional arguments passed to :func:`msdss_users_api.tools.create_user_db_func`. Return ------ dict Returns a dictionary with the following keys: * ``get_user_db_context`` (:func:`contextlib.asynccontextmanager`): function returned from :func:`contextlib.asynccontextmanager` created from an auto-configured :func:`msdss_users_api.tools.create_user_db_func` function * ``get_user_db`` (func): user db function from :func:`msdss_users_api.tools.create_user_db_func` * ``async_database`` (:class:`databases:databases.Database`): auto-configured :class:`databases:databases.Database` from env vars * ``database_engine`` (:class:`sqlalchemy:sqlalchemy.engine.Engine`): auto-configured :class:`sqlalchemy:sqlalchemy.engine.Engine` from env vars Author ------ Richard Wen <rrwen.dev@gmail.com> Example ------- .. jupyter-execute:: from msdss_users_api.tools import * results = create_user_db_context() get_user_db_context = results['get_user_db_context'] async_database = results['async_database'] """ # (create_user_db_func_db) Create databases database_engine = database._connection async_database = databases.Database(str(database_engine.url)) # (get_user_db_context_return) Return user db context get_user_db = create_user_db_func(database_engine=database_engine, async_database=async_database, *args, **kwargs) out = dict( get_user_db_context=contextlib.asynccontextmanager(get_user_db), get_user_db=get_user_db, async_database=async_database, database_engine=database_engine ) return out
2bafe1f31f19c2b115d54e61c124f06368694b6b
3,658,255
from pathlib import Path import textwrap def config(base_config): """:py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests.""" config_file = Path(base_config.file) with config_file.open("at") as f: f.write( textwrap.dedent( """\ file group: allen vhfr fvcom runs: host: arbutus.cloud run types: nowcast x2: results: /nemoShare/MEOPAR/SalishSea/fvcom-nowcast-x2/ forecast x2: results: /nemoShare/MEOPAR/SalishSea/fvcom-forecast-x2/ nowcast r12: results: /nemoShare/MEOPAR/SalishSea/fvcom-nowcast-r12/ results archive: nowcast x2: /opp/fvcom/nowcast-x2/ forecast x2: /opp/fvcom/forecast-x2/ nowcast r12: /opp/fvcom/nowcast-r12/ """ ) ) config_ = nemo_nowcast.Config() config_.load(config_file) return config_
3ed4253f660a87e8b24392b4eb926b387067010f
3,658,256
def __check_complete_list(list_, nb_max, def_value): """ make sure the list is long enough complete with default value if not :param list_: list to check :param nb_max: maximum length of the list :param def_value: if list too small, completes it with this value :return: boolean, False if the list is too long """ if len(list_) <= nb_max: list_.extend([def_value] * (nb_max - len(list_))) return True else: return False
9d439cd3eeea04e7a3e0e59aa4fe0bbb875bdfe4
3,658,257
import pickle def _fill_function(func, globals, defaults, closure, dct): """ Fills in the rest of function data into the skeleton function object that were created via _make_skel_func(). """ func.func_globals.update(globals) func.func_defaults = defaults func.func_dict = dct if len(closure) != len(func.func_closure): raise pickle.UnpicklingError("closure lengths don't match up") for i in range(len(closure)): _change_cell_value(func.func_closure[i], closure[i]) return func
7ac454b7d6c43f49da1adf32522c03d28d88e6b7
3,658,258