content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def encrypt(key, pt, Nk=4): """Encrypt a plain text block.""" assert Nk in {4, 6, 8} rkey = key_expand(key, Nk) ct = cipher(rkey, pt, Nk) return ct
41d94f1c050d89e85c6e9f3c74de1cb3cae7a899
1,000
import requests import logging def upload(filename, url, token=None): """ Upload a file to a URL """ headers = {} if token: headers['X-Auth-Token'] = token try: with open(filename, 'rb') as file_obj: response = requests.put(url, data=file_obj, timeout=120, headers=headers, verify=False) except requests.exceptions.RequestException as err: logging.warning('RequestException when trying to upload file %s: %s', filename, err) return None except IOError as err: logging.warning('IOError when trying to upload file %s: %s', filename, err) return None if response.status_code == 200 or response.status_code == 201: return True return None
eb8a8060294322bd9df187c8076d8f66b4dc775c
1,001
import torch def cost(states, sigma=0.25): """Pendulum-v0: Same as OpenAI-Gym""" l = 0.6 goal = Variable(torch.FloatTensor([0.0, l]))#.cuda() # Cart position cart_x = states[:, 0] # Pole angle thetas = states[:, 2] # Pole position x = torch.sin(thetas)*l y = torch.cos(thetas)*l positions = torch.stack([cart_x + x, y], 1) squared_distance = torch.sum((goal - positions)**2, 1) squared_sigma = sigma**2 cost = 1 - torch.exp(-0.5*squared_distance/squared_sigma) return cost
fdbf3105ff04437b05b5914aac43c61706f87287
1,002
def flatmap(fn, seq): """ Map the fn to each element of seq and append the results of the sublists to a resulting list. """ result = [] for lst in map(fn, seq): for elt in lst: result.append(elt) return result
c42d07f712a29ece76cd2d4cec4f91ec2562a1c0
1,003
def the_test_file(): """the test file.""" filename = 'tests/resources/grype.json' script = 'docker-grype/parse-grype-json.py' return { 'command': f'{script} {filename}', 'host_url': 'local://' }
d97d621d05f3844053b42c878dc8189fc8d264d0
1,004
import csv def build_stations() -> tuple[dict, dict]: """Builds the station dict from source file""" stations, code_map = {}, {} data = csv.reader(_SOURCE["airports"].splitlines()) next(data) # Skip header for station in data: code = get_icao(station) if code and station[2] in ACCEPTED_STATION_TYPES: stations[code] = format_station(code, station) code_map[station[0]] = code return stations, code_map
773d34c7d33585611dfb79fc4beaf8702a2c57df
1,005
def vox_mesh_iou(voxelgrid, mesh_size, mesh_center, points, points_occ, vox_side_len=24, pc=None): """LeoZDong addition: Compare iou between voxel and mesh (represented as points sampled uniformly inside the mesh). Everything is a single element (i.e. no batch dimension). """ # Un-rotate voxels to pointcloud orientation voxelgrid = voxelgrid.copy() voxelgrid = np.flip(voxelgrid, 1) voxelgrid = np.swapaxes(voxelgrid, 0, 1) # voxelgrid = np.swapaxes(voxelgrid, 0, 2) # Find voxel centers as if they are in a [-0.5, 0.5] bbox vox_center = get_vox_centers(voxelgrid) # Rescale points so that the mesh object is 0-centered and has longest side # to be 0.75 (vox_side_len=24 / 32) points += vox_center - mesh_center scale = (vox_side_len / voxelgrid.shape[0]) / mesh_size points *= scale # import ipdb; ipdb.set_trace() cond = np.stack((points.min(1) > -0.5, points.max(1) < 0.5), 0) in_bounds = np.all(cond, 0) vox_occ = np.zeros_like(points_occ) vox_occ[in_bounds] = points_occ_in_voxel(voxelgrid, points[in_bounds, :]) # Find occupancy in voxel for the query points # vox_occ = points_occ_in_voxel(voxelgrid, points) iou = occ_iou(points_occ, vox_occ) #### DEBUG #### # vox_occ_points = points[vox_occ > 0.5] # gt_occ_points = points[points_occ > 0.5] # int_occ_points = points[(vox_occ * points_occ) > 0.5] # save_dir = '/viscam/u/leozdong/shape2prog/output/chair/GA_24/meshes/table/cd5f235344ff4c10d5b24cafb84903c7' # save_ply(vox_occ_points, os.path.join(save_dir, 'vox_occ_points.ply')) # save_ply(gt_occ_points, os.path.join(save_dir, 'gt_occ_points.ply')) # save_ply(int_occ_points, os.path.join(save_dir, 'int_occ_points.ply')) # print("iou:", iou) return iou
a720701dadb6321e402048425224cbaf91f507aa
1,006
def qhxl_attr_2_bcp47(hxlatt: str) -> str: """qhxl_attr_2_bcp47 Convert HXL attribute part to BCP47 Args: hxlatt (str): Returns: str: """ resultatum = '' tempus1 = hxlatt.replace('+i_', '') tempus1 = tempus1.split('+is_') resultatum = tempus1[0] + '-' + tempus1[1].capitalize() # @TODO: test better cases with +ix_ resultatum = resultatum.replace('+ix_', '-x-') return resultatum
a44a0c09345176104e7b7c1d26a920620157ec67
1,007
def _(output): """Handle the output of a bash process.""" logger.debug('bash handler: subprocess output: {}'.format(output)) if output.returncode == 127: raise exceptions.ScriptNotFound() return output
e71ce6c566c09e100ae3463109f5bcc6d676b494
1,008
def process_row(row, fiscal_fields): """Add and remove appropriate columns. """ surplus_keys = set(row) - set(fiscal_fields) missing_keys = set(fiscal_fields) - set(row) for key in missing_keys: row[key] = None for key in surplus_keys: del row[key] assert set(row) == set(fiscal_fields) return row
1c55fe628b53be72633d2fcae7cc1fbac91d04ae
1,009
def DefaultTo(default_value, msg=None): """Sets a value to default_value if none provided. >>> s = Schema(DefaultTo(42)) >>> s(None) 42 """ def f(v): if v is None: v = default_value return v return f
10401d7214d15c2b0bf28f52430ef71b5df0a116
1,010
def load_files(file_list, inputpath): """ function to load the data from potentially multiple files into one pandas DataFrame """ df = None # loop through files and append for i, file in enumerate(file_list): path = f"{inputpath}/{file}" print(path) df_i = pd.read_csv(path) if i == 0: df = df_i else: df = pd.concat([df, df_i], axis=0, ignore_index=True) return df
2f1ec9519c4ff1cb9d8a2f492e80cc05ecb968db
1,011
def list_all(): """ List all systems List all transit systems that are installed in this Transiter instance. """ return systemservice.list_all()
21efc81b1312f01d6b016fa10cdf675b0e22655f
1,012
def putText(image: np.ndarray, text: str, org=(0, 0), font=_cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=(0, 0, 255), thickness=1, lineType=_cv2.LINE_AA, bottomLeftOrigin=False) -> np.ndarray: """Add text to `cv2` image, with default values. :param image: image array :param text: text to be added :param org: origin of text, from top left by default :param font: font choice :param fontScale: font size :param color: BGR color, red by default :param thickness: font thickness :param lineType: line type of text :param bottomLeftOrigin: True to start from bottom left, default False :return: image with text added """ return _cv2.putText(image, text, org, font, fontScale, color, thickness, lineType, bottomLeftOrigin)
37fd20c2afb70a59f78f35741c235e9793721dab
1,013
def gaussFilter(fx: int, fy: int, sigma: int): """ Gaussian Filter """ x = tf.range(-int(fx / 2), int(fx / 2) + 1, 1) Y, X = tf.meshgrid(x, x) sigma = -2 * (sigma**2) z = tf.cast(tf.add(tf.square(X), tf.square(Y)), tf.float32) k = 2 * tf.exp(tf.divide(z, sigma)) k = tf.divide(k, tf.reduce_sum(k)) return k
b83bcadba782f16f6932c081b9f20ad9bd71828b
1,014
def do_something(param=None): """ Several routes for the same function FOO and BAR have different documentation --- """ return "I did something with {}".format(request.url_rule), 200
7a50206c27b66d2b3ff588777ea95927b527a719
1,015
import re from typing import Literal def extract_text( pattern: re.Pattern[str] | str, source_text: str, ) -> str | Literal[False]: """Match the given pattern and extract the matched text as a string.""" match = re.search(pattern, source_text) if not match: return False match_text = match.groups()[0] if match.groups() else match.group() return match_text
a6f762cfd26dd1231db4b6e88247e2566d186212
1,016
import os def _checksum_paths(): """Returns dict {'dataset_name': 'path/to/checksums/file'}.""" dataset2path = {} for dir_path in _CHECKSUM_DIRS: for fname in _list_dir(dir_path): if not fname.endswith(_CHECKSUM_SUFFIX): continue fpath = os.path.join(dir_path, fname) dataset_name = fname[:-len(_CHECKSUM_SUFFIX)] dataset2path[dataset_name] = fpath return dataset2path
5685ad37a6b38355a59f24bcd02f90db265b0714
1,017
def get_merged_message_df(messages_df, address_book, print_debug=False): """ Merges a message dataframe with the address book dataframe to return a single dataframe that contains all messages with detailed information (e.g. name, company, birthday) about the sender. Args: messages_df: a dataframe containing all transmitted messages address_book: a dataframe containing the address book as loaded via this module print_debug: true if we should print out the first row of each intermediary table as it's created Returns: a dataframe that contained all messages with info about their senders """ phones_with_message_id_df = __get_address_joined_with_message_id(address_book) if print_debug: print('Messages Dataframe') display(messages_df.head(1)) print('Address Book Dataframe') display(address_book.head(1)) print('Phones/emails merged with message IDs via chats Dataframe') display(phones_with_message_id_df.head(1)) return messages_df.merge(phones_with_message_id_df, how='left', suffixes=['_messages_df', '_other_join_tbl'], left_index=True, right_on='message_id', indicator='merge_chat_with_address_and_messages')
be7f98c2b2415f02795e54d8c9b627b5f5a037cd
1,018
def nodal_distribution_factors_v2(topo: ndarray, volumes: ndarray): """The j-th factor of the i-th row is the contribution of element i to the j-th node. Assumes a regular topology.""" ndf = nodal_distribution_factors(topo, volumes) return ndf
b805b9fa2617bc9501910bc43cb623cd15d3aea5
1,019
def game_core_binary(number_to_guess): """Binary search approach. Set the first predict value as the middle of interval, i.e. 50. Then decrease or increase the predict number by step. The step is calculated using the check interval divided by 2, i.e. 25, 13 ... 1 The minimum step is always 1. The function return count of guesses""" count_guesses = 1 predict = step = round(MAX_NUMBER / 2) while number_to_guess != predict: count_guesses += 1 step = round(step / 2) if step > 1 else 1 if number_to_guess > predict: predict += step elif number_to_guess < predict: predict -= step return count_guesses
909322bda51c25175c372708896bc6aca5e9753b
1,020
def linear_trend(series, return_line=True): """ USAGE ----- line = linear_trend(series, return_line=True) OR b, a, x = linear_trend(series, return_line=False) Returns the linear fit (line = b*x + a) associated with the 'series' array. Adapted from pylab.detrend_linear. """ series = np.asanyarray(series) x = np.arange(series.size, dtype=np.float_) C = np.cov(x, series, bias=1) # Covariance matrix. b = C[0, 1]/C[0, 0] # Angular coefficient. a = series.mean() - b*x.mean() # Linear coefficient. line = b*x + a if return_line: return line else: return b, a, x
129b63dd9f194dd0a6506e2645e330fe92ea6a1c
1,021
import torch def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): """Gradcheck wrapper for masked operations. When mask is specified, replaces masked-out elements with zeros. Use for operations that produce non-finite masked-out elements, for instance, for minimum and maximum reductions. """ output = op(input, *args, **kwargs) mask = kwargs.get('mask') if mask is not None: output_mask = torch._masked._output_mask(op, input, *args, **kwargs) output = torch.where(output_mask, output, output.new_zeros([])) return output
fa0d3433a8cf3d60c81c96dc154d8f0e82acd791
1,022
def classify(neural_net, image_file): """ Using the given model and image file, returns the model's prediction for the image as an array. """ img = Image.open(image_file) img.load() img_array = np.asarray(img) img_array.shape = (1, 100, 100, 3) prediction = model.predict(img_array)[0][0] return prediction
3d8b301b3f41b5cad04233228198424670f06506
1,023
def delete(job): """Delete a job.""" # Initialise variables. jobid = job["jobid"] try: shellout = shellwrappers.sendtossh(job, ["qdel " + jobid]) except exceptions.SSHError: raise exceptions.JobdeleteError("Unable to delete job.") return shellout[0]
c870e07210063136ac3651691d1e54dc292f0830
1,024
import itertools def optimum_simrank(x_p, x_n, alpha): """Intermediary function to the one below.""" pos_pair_1 = itertools.combinations(x_p, 2) pos_pair_2 = itertools.combinations(x_n, 2) neg_pair = itertools.product(x_p, x_n) def get_val_from_pair(x): # Transforms each pair into one minus the minimum of its l1 distance to (0,0) or (1,1). distance_to_lower_corner = max(abs(x[0]), abs(x[1])) distance_to_upper_corner = max(abs(1. - x[0]), abs(1. - x[1])) return 1 - min(distance_to_lower_corner, distance_to_upper_corner) x_p = (np.array(list(map(get_val_from_pair, pos_pair_1)) + list(map(get_val_from_pair, pos_pair_2)))) x_n = np.array(list(map(get_val_from_pair, neg_pair))) def opt_fun(i_p, i_n): if float(i_n) / x_n.shape[0] <= alpha: return i_p / x_p.shape[0] return - float("inf") X = np.hstack([x_p, x_n]) Y = np.array([+1]*len(x_p) + [-1]*len(x_n)) f_opt, crit_opt, _ = ut.bipart_partition(X, Y, opt_fun) return 1-f_opt, crit_opt
bc4f451dc2ae5f9fe653e9330241202b5f470e49
1,025
from enaml.core.import_hooks import imports from contextlib import contextmanager from enaml.core.operators import operator_context def imports(operators=None, union=True): """ Lazily imports and returns an enaml imports context. Parameters ---------- operators : dict, optional An optional dictionary of operators to push onto the operator stack for the duration of the import context. If this is not provided, the default Enaml operators will be used. Unless a custom model framework is being used (i.e. not Atom), custom operators will typically not be needed. union : bool, optional Whether to union the operators with the operators on the top of the operator stack. The default is True and is typically the correct choice to allow overriding a subset of the default Enaml operators. Returns ------- result : context manager A context manager which will install the Enaml import hook (and optional operators) for the duration of the context. """ if operators is None: return imports() @contextmanager def imports_context(): with imports(): with operator_context(operators, union): yield return imports_context()
c0068c39a4c9c39c8789fd79ed651ecf2e50c3b7
1,026
import io import tokenize from typing import cast def apply_job_security(code): """Treat input `code` like Python 2 (implicit strings are byte literals). The implementation is horribly inefficient but the goal is to be compatible with what Mercurial does at runtime. """ buf = io.BytesIO(code.encode("utf8")) tokens = tokenize.tokenize(buf.readline) # NOTE: by setting the fullname to `mercurial.pycompat` below, we're # ensuring that hg-specific pycompat imports aren't inserted to the code. data = tokenize.untokenize(replacetokens(list(tokens), "mercurial.pycompat")) return cast(str, data.decode("utf8"))
8dd7e0f6ad91f9c98ea50ac76fb30616d9d8f266
1,027
def fetch(gpname: str): """" Gives gunpowder Parameters ---------- gpname: str Gunpowder name Returns ------- gpowder: dict Gunpowder in dictionary form """ gpowders = _load_many() return gpowders[gpname]
e880a62c92937d564ff84af33c7c0e1dd2383d9d
1,028
def _kc_frequency_features(time_data, times, sfreq): """ Calculate absolute power of delta and alpha band before (on a 3 seconds windows) and after K-complexes""" exp = [('before', -2.5, -0.5), ('after', 1, 3)] res = {} for m in exp: kc_matrix_temp = time_data[:, np.bitwise_and(times > m[1], times < m[2])] absol_power = compute_absol_pow_freq_bands(sfreq, kc_matrix_temp, psd_method='multitaper', psd_params={'mt_adaptive': True, 'mt_bandwidth': 3, 'mt_low_bias': True}, freq_bands=[0.5, 4, 8, 12]) delta = absol_power[:, 0] alpha = absol_power[:, 2] res[m[0]] = (delta, alpha) delta_before, alpha_before, delta_after, alpha_after = res['before'][0], res['before'][1],\ res['after'][0], res['after'][1] return delta_before, alpha_before, delta_after, alpha_after
0e0df2c3f2b0baa8e6fb8118fa01a89b62c2656c
1,029
import numpy as np import pandas.io.data as pd from matplotlib.pyplot import plot, grid, show, figure def gentrends(x, window=1/3.0, charts=True): """ Returns a Pandas dataframe with support and resistance lines. :param x: One-dimensional data set :param window: How long the trendlines should be. If window < 1, then it will be taken as a percentage of the size of the data :param charts: Boolean value saying whether to print chart to screen """ x = np.array(x) if window < 1: window = int(window * len(x)) max1 = np.where(x == max(x))[0][0] # find the index of the abs max min1 = np.where(x == min(x))[0][0] # find the index of the abs min # First the max if max1 + window > len(x): max2 = max(x[0:(max1 - window)]) else: max2 = max(x[(max1 + window):]) # Now the min if min1 - window < 0: min2 = min(x[(min1 + window):]) else: min2 = min(x[0:(min1 - window)]) # Now find the indices of the secondary extrema max2 = np.where(x == max2)[0][0] # find the index of the 2nd max min2 = np.where(x == min2)[0][0] # find the index of the 2nd min # Create & extend the lines maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline a_min = x[min1] - (minslope * min1) # y-intercept for min trendline b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's minline = np.linspace(a_min, b_min, len(x)) # Y values between min's # OUTPUT trends = np.transpose(np.array((x, maxline, minline))) trends = pd.DataFrame(trends, index=np.arange(0, len(x)), columns=['Data', 'Max Line', 'Min Line']) if charts is True: figure() plot(trends) grid() show() return trends, maxslope, minslope
236ca4e206619da83b9f4dea92655c80714e062f
1,030
import functools from operator import add def gen_cand_keyword_scores(phrase_words, word_score): """ Computes the score for the input phrases. :param phrase_words: phrases to score :type phrase_words: list :param word_score: calculated word scores :type word_score: list :return: dict *{phrase: score, ...}* """ keyword_candidates = defaultdict(int) for phrase, word_list in phrase_words: if not word_list: continue candidate_score = functools.reduce( add, [word_score[word] for word in word_list] ) keyword_candidates[phrase] = candidate_score return keyword_candidates
d219256938ab2538214cbc075451f7da5a253b06
1,031
def analyze_network(directed=False, base_url=DEFAULT_BASE_URL): """Calculate various network statistics. The results are added to the Node and Edge tables and the Results Panel. The summary statistics in the Results Panel are also returned by the function as a list of named values. Args: directed (bool): If True, the network is considered a directed graph. Default is False. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: Named list of summary statistics Raises: requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> analyze_network() {'networkTitle': 'galFiltered.sif (undirected)', 'nodeCount': '330', 'edgeCount': '359', 'avNeighbors': '2.379032258064516', 'diameter': '27', 'radius': '14', 'avSpl': '9.127660963823953', 'cc': '0.06959203036053131', 'density': '0.009631709546819902', 'heterogeneity': '0.8534500004035027', 'centralization': '0.06375695335900727', 'ncc': '26'} >>> analyze_network(True) {'networkTitle': 'galFiltered.sif (directed)', 'nodeCount': '330', 'edgeCount': '359', 'avNeighbors': '2.16969696969697', 'diameter': '10', 'radius': '1', 'avSpl': '3.4919830756382395', 'cc': '0.03544266191325015', 'density': '0.003297411808050106', 'ncc': '26', 'mnp': '1', 'nsl': '0'} """ res = commands.commands_post(f'analyzer analyze directed={directed}', base_url=base_url) return res
0edd9e848e3b3060055e6845aa5fbb2792c7a1f4
1,032
def create_user(): """ Create new user """ # request.get_json(): extract the JSON from the request and return it as # a Python structure. data = request.get_json() or {} # Validate mandatory fields if 'username' not in data or 'email' not in data or \ 'password' not in data: return bad_request('must include username, email and password fields') if User.query.filter_by(username=data['username']).first(): return bad_request('please use a different username') if User.query.filter_by(email=data['email']).first(): return bad_request('please use a different email address') # Create user user = User() user.from_dict(data, new_user=True) db.session.add(user) db.session.commit() # Make response response = jsonify(user.to_dict()) # The status code for a POST request that creates a resource should be 201 response.status_code = 201 response.headers['Location'] = url_for('api.get_user', id=user.id) return response
a416e0d5bbb6539cee3ce5174ab3cf1186680ee9
1,033
import hashlib import base64 def hash_long_to_short(long_url): """ turn a long input url into a short url's url-safe 5 character hash this is deterministic and the same long_url will always have the same hash """ encoded = long_url.encode("utf-8") md5_hash = hashlib.md5(encoded).digest() return base64.urlsafe_b64encode(md5_hash)[:SHORT_URL_HASH_LENGTH]
050de3e30feeac46f98b152890d82dd8e416f2d0
1,034
import os def cutout_vstAtlas(ra, dec, bands=["u","g","r","i","z"], database="ATLASDR3",\ psfmags=None, imDir="/data/vst-atlas/", input_filename=[], saveFITS=False,\ width_as=20., smooth=False, cmap="binary", minmax="MAD", origin="lower", figTitle=True, \ return_val=False, saveDir=None): """ Plot all the bands cutouts on one plot for an input source position ## Cutouts parameters width_as: size of the cutout box; default is 20arcsec smooth: gaussian smoothing with sigma=1.0; defaul is False cmap: image colour map minmax: Defined the min-max scale of the image; default is from sigma_MAD(image) (SEE def cutout_scale) origin: where to place the [0,0] index of the image; default is "lower" figTitle: add a title to the final figure (ex: VISTA cutout 20"x20" ra=, dec= (Jradec); default is True) ## VISTA parameters ra, dec: position of the source in deg (single object, not an array) bands: filters for which to do the cutouts psfmags: magnitudes of the source. Should be an array of the same size than bands or None (default) Will be added to band cutout title if not None imDir: directory of the fits file if already save on disk input_filename: name of the input file if save on disk database: ATLAS database used = ATLAS + DataRealease saveFITS: save fits tile file on disk (to imDir) ## Output parameters return_val: return image data, min-max(image); default is False saveDir: output directory to save the final figure. If None do not save; default is None """ print("VST-ATLAS cutout(s), band(s):", "".join(bands)) ### radec: HHMMSSsDDMMSS radec_str = radecStr(ra, dec, precision=1) ### Figure: defined fig and gs figWidth = len(bands) * 8./3. fig = plt.figure(figsize=(figWidth, 4)) fig.subplots_adjust(left = 0.05, right = 0.95, top = 0.90, bottom = 0, wspace = 0) gs = gridspec.GridSpec(1, len(bands)) datas = [] for i, band in enumerate(bands): print("{}-band".format(band)) ### Filename of fits image if save of the disk if len(input_filename) == 0: input_filename = "" else: input_filename = input_filename[i] filename = imDir + input_filename ### If filename does nor exists -> get file from url if not os.path.exists(filename) or input_filename == "": filename = cdl.vstAtlas_dl(ra, dec, band, database=database, width_as=width_as,\ FitsOutputPath=imDir, saveFITS=saveFITS) print(" ", filename) ### Read fits file: cutout size = width_as ### filename could be a system path or an url or "" print(" Try to read the fits file ...") image,wcs = rd_fits(filename, ra, dec, hdrNum=1, width_as=width_as, pixelscale=0.21, smooth=smooth) ### Plot image: cutout size = width_as print(" Plot the cutout ...") ax = fig.add_subplot(gs[0,i]) if psfmags is not None: psfmags = psfmags[i] vmin, vmax = plt_image(band, image, fig, ax, psfmags=psfmags, cmap=cmap, minmax=minmax, origin=origin) datas.append((image, vmin, vmax, wcs)) ## Add a title to the figure if figTitle: fig.suptitle('VST-ATLAS cutouts ({:.0f}"x{:.0f}") \n ra: {:.4f}, dec: {:.4f} (J{})'.format(width_as, width_as,\ ra, dec, radec_str), fontsize=15) ### Output if return_val: print(" Return image data") plt.close(fig) return datas if saveDir is not None: print(" Save the figure to", saveDir) allBands = "".join(bands) plt.savefig(saveDir + "Cutouts_VISTA-{}_{}_{}_{:.0f}arcsec.png".format(survey, radec_str, allBands, width_as),\ bbox_inches="tight") plt.close() else: print(" Return the figure") return fig
7bf0d0ba8d7bcad847206e2cee1f386616939b66
1,035
def has_prefix(sub_s): """ Test possibility of sub_s before doing recursion. :param sub_s: sub_string of input word from its head. :return: (boolean) whether word stars with sub_s. """ for word in DATABASE: if word.startswith(sub_s): return True
2dde507f7b0b3c56f8a5a9a582d52b784607dd5d
1,036
def transform_results(search_result, user, department_filters): """ Transform podcast and podcast episode, and userlist and learning path in aggregations Add 'is_favorite' and 'lists' fields to the '_source' attributes for learning resources. Args: search_result (dict): The results from ElasticSearch user (User): the user who performed the search Returns: dict: The Elasticsearch response dict with transformed aggregates and source values """ for aggregation_key in [ "type", "topics", "offered_by", "audience", "certification", "department_name", "level", "course_feature_tags", "resource_type", ]: if f"agg_filter_{aggregation_key}" in search_result.get("aggregations", {}): if aggregation_key == "level": levels = ( search_result.get("aggregations", {}) .get(f"agg_filter_{aggregation_key}", {}) .get("level", {}) .get("level", {}) ) if levels: search_result["aggregations"]["level"] = { "buckets": [ { "key": bucket["key"], "doc_count": bucket["courses"]["doc_count"], } for bucket in levels.get("buckets", []) if bucket["courses"]["doc_count"] > 0 ] } else: search_result["aggregations"][aggregation_key] = search_result[ "aggregations" ][f"agg_filter_{aggregation_key}"][aggregation_key] search_result["aggregations"].pop(f"agg_filter_{aggregation_key}") types = search_result.get("aggregations", {}).get("type", {}) if types: type_merges = dict( zip( (PODCAST_EPISODE_TYPE, LEARNING_PATH_TYPE), (PODCAST_TYPE, USER_LIST_TYPE), ) ) for child_type, parent_type in type_merges.items(): child_type_bucket = None parent_type_bucket = None for type_bucket in search_result["aggregations"]["type"]["buckets"]: if type_bucket["key"] == child_type: child_type_bucket = type_bucket elif type_bucket["key"] == parent_type: parent_type_bucket = type_bucket if child_type_bucket and parent_type_bucket: parent_type_bucket["doc_count"] = ( child_type_bucket["doc_count"] + parent_type_bucket["doc_count"] ) search_result["aggregations"]["type"]["buckets"].remove( child_type_bucket ) elif child_type_bucket: child_type_bucket["key"] = parent_type search_result["aggregations"]["type"]["buckets"].sort( key=lambda bucket: bucket["doc_count"], reverse=True ) if not user.is_anonymous: favorites = ( FavoriteItem.objects.select_related("content_type") .filter(user=user) .values_list("content_type__model", "object_id") ) for hit in search_result.get("hits", {}).get("hits", []): object_type = hit["_source"]["object_type"] if object_type in LEARNING_RESOURCE_TYPES: if object_type == LEARNING_PATH_TYPE: object_type = USER_LIST_TYPE object_id = hit["_source"]["id"] hit["_source"]["is_favorite"] = (object_type, object_id) in favorites hit["_source"]["lists"] = get_list_items_by_resource( user, object_type, object_id ) search_result = _transform_search_results_suggest(search_result) if len(department_filters) > 0: _transform_search_results_coursenum(search_result, department_filters) return search_result
93bbb9cb3effa4b0f602e42549a961f4fd53faeb
1,037
def kl_div_loss(inputs: Tensor, targets: Tensor) -> Tensor: """Computes the Kullback–Leibler divergence loss between two probability distributions.""" return F.kl_div(F.log_softmax(inputs, dim=-1), F.softmax(targets, dim=-1), reduction="none")
9a45dacfe8fd529893cf7fa813869a97da562f65
1,038
from typing import List def get_schema_names(connection: psycopg2.extensions.connection) -> List[psycopg2.extras.RealDictRow]: """Function for getting the schema information from the given connection :param psycopg2.extensions.connection connection: The connection :return: List of rows using key-value pairs for the data :rtype: List[psycopg2.extras.RealDictRow] """ with connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor: query = """SELECT * FROM information_schema.schemata""" cursor.execute(query) results = cursor.fetchall() return results
69a4e0b70ef443c2480f0fbb1e1e859bbf6f69bd
1,039
def parse(string): """Returns a list of specs from an input string. For creating one spec, see Spec() constructor. """ return SpecParser().parse(string)
788849ebaa29b4dab5e4babcb13573acbc8b8525
1,040
def get_provider_idx(provider_type): """Return the index associated to the type. """ try: return PROVIDERS_TYPE[provider_type]['idx'] except KeyError as error: raise ProviderError( "Provider type (%s) is not supported yet." % (provider_type, ) )
47272903415825c870222b3531fddc11129d62c0
1,041
import collections def file_based_convert_examples_to_features( examples, slot_label_list, intent_label_list, max_seq_length, tokenizer, output_file): """ 将InputExamples转成tf_record,并写入文件 Convert a set of InputExample to a TFRecord file. :param examples: [(text, CRF_label, class_label), ...] :param slot_label_list: CRF标签列表(String) :param intent_label_list: 触发词类别列表(String) :param max_seq_length: :param tokenizer: :param output_file: TFRecord file :return: """ writer = tf.io.TFRecordWriter(output_file) for ex_index, example in enumerate(examples): def create_int_feature(values): return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) if ex_index % 10000 == 0: logger.info("Writing example %d of length %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, max_seq_length, tokenizer) # convert to tensorflow format features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["slot_ids"] = create_int_feature(feature.slot_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features['is_value_ids'] = create_int_feature(feature.is_value_ids) features["is_real_example"] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) # 写入一个样本到tf_record writer.close()
b5d4a9228af4169307a8a22f4c56a0c3eb6e8f27
1,042
def create_readme(df): """Retrieve text from README.md and update it.""" readme = str categories = pd.unique(df["category"]) categories.sort() with open('README.md', 'r', encoding='utf-8') as read_me_file: read_me = read_me_file.read() splits = read_me.split('<!---->') # Initial project description text_intro = splits[0] # Contribution and contacts text_contributing = splits[3] text_contacts = splits[4] # TOC toc = "\n\n- [Awesome Citizen Science Projects](#awesome-citizen-science-projects)\n" # Add categories for cat in range(len(categories)): toc += f" - [{categories[cat]}](#{categories[cat]})" + "\n" # Add contributing and contact to TOC toc += "- [Contributing guidelines](#contributing-guidelines)\n" toc += "- [Contacts](#contacts)\n" # Add first part and toc to README readme = text_intro + "<!---->" + toc + "\n<!---->\n" # Add projects subtitle readme += "\n## Projects\n" # Add individual categories to README list_blocks = "" for cat in range(len(categories)): block = f"\n### {categories[cat]}\n\n" filtered = df[df["category"] == categories[cat]] list_items = "" for i, r in filtered.iterrows(): try: start_date = int(r['start_date']) except: start_date = "NA" if not pd.isna(r['icon']): project = f"- {r['icon']} [{r['name']}]({r['main_source']}) - {r['description']} (`{start_date}` - `{str(r['end_date'])}`)\n" list_items = list_items + project else: project = f"- [{r['name']}]({r['main_source']}) - {r['description']} (`{start_date}` - `{str(r['end_date'])}`)\n" list_items = list_items + project list_blocks = list_blocks + block + list_items # Add to categories to README.md readme += list_blocks + "\n" # Add contribution and contacts readme += '<!---->' + text_contributing readme += '<!---->' + text_contacts return readme
5e0d207baa3d5c1e1f68b6f2e1a347bffece901a
1,043
async def get_leaderboard_info_by_id( # ScoreSaber leaderboardId leaderboardId: float ): """ GET /api/leaderboard/by-id/{leaderboardId}/info """ # request request_url = f'{SERVER}/api/leaderboard/by-id/{leaderboardId}/info' response_dict = await request.get(request_url) return LeaderboardInfo.gen(response_dict)
ab081d17b462a0738c578c9caed93c7b4a1ec9a6
1,044
def distance(lat1,lon1,lat2,lon2): """Input 2 points in Lat/Lon degrees. Calculates the great circle distance between them in radians """ rlat1= radians(lat1) rlon1= radians(lon1) rlat2= radians(lat2) rlon2= radians(lon2) dlat = rlat1 - rlat2 dlon = rlon1 - rlon2 a = pow(sin(dlat/2.0),2) + cos(rlat1)*cos(rlat2)*pow(sin(dlon/2.0),2) c = 2* atan2(sqrt(a), sqrt(1-a)) return c
2c6b1692843db3f69c750f4b2acda43d49227e7a
1,045
def minimumSwaps(arr): """ O(nlogn) """ len_arr = len(arr) arr_dict = {key+1:value for key, value in enumerate(arr)} arr_checked = [False]*len_arr total_count = 0 for key, value in arr_dict.items(): count = 0 while key != value and arr_checked[key-1] is False: arr_checked[value-1] = True count += 1 value = arr_dict.get(value) arr_checked[key-1] = True total_count += count return total_count
d5251297fd52f99aefce69986bd5c8c126b7e6b6
1,046
def store_user_bot(user_id, intended_user, bot_id): """Store an uploaded bot in object storage.""" if user_id != intended_user: raise api_util.user_mismatch_error( message="Cannot upload bot for another user.") if bot_id != 0: raise util.APIError( 400, message="Sorry, only one bot allowed per user.") uploaded_file = validate_bot_submission() with model.engine.connect() as conn: team = conn.execute(model.team_leader_query(user_id)).first() if team: user_id = intended_user = team["leader_id"] bot_where_clause = (model.bots.c.user_id == user_id) & \ (model.bots.c.id == bot_id) bot = conn.execute(model.bots.select(bot_where_clause)).first() if not bot: raise util.APIError(404, message="Bot not found.") # Check if the user already has a bot compiling if bot["compile_status"] == model.CompileStatus.IN_PROGRESS.value: raise util.APIError(400, message="Cannot upload new bot until " "previous one is compiled.") blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id), model.get_compilation_bucket(), chunk_size=262144) blob.upload_from_file(uploaded_file) # Flag the user as compiling update = model.bots.update() \ .where(bot_where_clause) \ .values( compile_status=model.CompileStatus.UPLOADED.value, update_time=sqlalchemy.sql.func.now(), timeout_sent=False, ) conn.execute(update) return util.response_success({ "user_id": user_id, "bot_id": bot["id"], })
2b19e4092df3cb93fdadf5f06176ec4ec9300f63
1,047
def dispatch(methods, request, notification_errors=False): """Dispatch JSON-RPC requests to a list of methods:: r = dispatch([cat], {'jsonrpc': '2.0', 'method': 'cat', 'id': 1}) The first parameter can be either: - A *list* of functions, each identifiable by its ``__name__`` attribute. - Or a *dictionary* of name:method pairs. When using a **list**, the methods must be identifiable by a ``__name__`` attribute. Functions already have a ``__name__`` attribute:: >>> def cat(): ... return 'meow' ... >>> cat.__name__ 'cat' >>> dispatch([cat], ...) Lambdas require setting it:: >>> cat = lambda: 'meow' >>> cat.__name__ = 'cat' >>> dispatch([cat], ...) As do partials:: >>> max_ten = partial(min, 10) >>> max_ten.__name__ = 'max_ten' >>> dispatch([max_ten], ...) Alternatively, consider using a **dictionary** instead:: >>> dispatch({'cat': cat, 'max_ten': max_ten}, ...) See the `Methods`_ module for another easy way to build the list of methods. :param methods: List or dict of methods to dispatch to. :param request: JSON-RPC request. This can be in dict or string form. Byte arrays should be `decoded <https://docs.python.org/3/library/codecs.html#codecs.decode>`_ first. :param notification_errors: Should `notifications <http://www.jsonrpc.org/specification#notification>`_ get error responses? Typically notifications don't receive any response, except for "Parse error" and "Invalid request" errors. Enabling this will include all other errors such as "Method not found". A notification is then similar to many unix commands - *"There was no response, so I can assume the request was successful."* :returns: A `Response`_ object - either `RequestResponse`_, `NotificationResponse`_, or `ErrorResponse`_ if there was a problem processing the request. In any case, the return value gives you ``body``, ``body_debug``, ``json``, ``json_debug``, and ``http_status`` values. """ # Process the request r = None error = None try: # Log the request request_log.info(str(request)) # Create request object (also validates the request) r = Request(request) # Call the requested method result = _call(methods, r.method_name, r.args, r.kwargs) # Catch any JsonRpcServerError raised (Invalid Request, etc) except JsonRpcServerError as e: error = e # Catch uncaught exceptions, respond with ServerError except Exception as e: # pylint: disable=broad-except # Log the uncaught exception logger.exception(e) # Create an exception object, used to build the response error = ServerError(str(e)) # Now build a response. # Error if error: # Notifications get a non-response - see spec if r and r.is_notification and not notification_errors: response = NotificationResponse() else: # Get the 'id' part of the request, to include in error response request_id = r.request_id if r else None response = ErrorResponse( error.http_status, request_id, error.code, error.message, error.data) # Success else: # Notifications get a non-response if r and r.is_notification: response = NotificationResponse() else: response = RequestResponse(r.request_id, result) # Log the response and return it response_log.info(response.body, extra={ 'http_code': response.http_status, 'http_reason': HTTP_STATUS_CODES[response.http_status]}) return response
3c086f864740086f611b702b1ad7f228fff4031f
1,048
def parse_conv(weights_file, cfg_parser, section, layer_dict): """ parse conv layer Args: weights_file (file object): file object of .weights file cfg_parser (ConfigParser object): ConfigParser object of .cfg file for net section (str): name of conv layer layer_dict (dictionary): dict storing layer info Returns: dict storing layer info and weights values """ prev_layer_channel = layer_dict['prev_layer_channel'] count = layer_dict['count'] filters = int(cfg_parser[section]['filters']) size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) pad = int(cfg_parser[section]['pad']) activation = cfg_parser[section]['activation'] batch_normalize = 'batch_normalize' in cfg_parser[section] weights_shape = (size, size, prev_layer_channel, filters) darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) prev_layer_channel = filters print('conv2d', 'bn' if batch_normalize else ' ', activation, weights_shape) bn_weight_list = [] conv_bias = [] if batch_normalize: bn_weights = np.ndarray( shape=(4, filters), dtype='float32', buffer=weights_file.read(filters * 16)) count += 4 * filters bn_weight_list = [ bn_weights[1], # scale gamma bn_weights[0], # shift beta bn_weights[2], # running mean bn_weights[3] # running var ] else: conv_bias = np.ndarray( shape=(filters, ), dtype='float32', buffer=weights_file.read(filters * 4)) count += filters conv_weights = np.ndarray( shape=darknet_w_shape, dtype='float32', buffer=weights_file.read(weights_size * 4)) count += weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) layer_dict['prev_layer_channel'] = prev_layer_channel layer_dict['count'] = count layer_dict['conv_weights'] = conv_weights layer_dict['conv_bias'] = conv_bias layer_dict['bn_weight_list'] = bn_weight_list return layer_dict
6e7cc1d2b4115dc44eaf2ad90240144f7157b30b
1,049
def generate_format_spec(num_vals, sep, dtypes, decimals=None): """ Generate a format specifier for generic input. -------------------------------------------------------------- Input num_vals : number of wild-cards sep : separator string (could be '_', '-', '--' ...) used to separate wild-cards dtypes : data types of the wildcards ('str', 'float', 'int') decimals : number of decimals (only relevant for floats) -------------------------------------------------------------- Output String of the form: "{0:<dtype>}<sep>{1:<dtype>}<sep>...", where each occurrence of <dtype> is replaced by the dtype value of the current wild-card and <sep> is replaced by the separator string. """ assert type(num_vals) is int # dictionary of identifiers for supported data types dident = dict([(str, 's'), (int, 'd'), \ (float, ''), #'.1f'\ (np.float64, '') #'.1f' ] ) if decimals is not None: assert type(decimals) is int dident[float] = '.{}f'.format(decimals) dident[np.float64] = '.{}f'.format(decimals) if not hasattr(dtypes, '__iter__'): dtypes = [dtypes,] * num_vals elif type(dtypes) is str: dtypes = [dtypes,] * num_vals elif len(dtypes) < num_vals: dtypes = [dtypes[0],] * num_vals for dt in dtypes: assert dt in dident.keys(), dt # construct actual output out = "" for i in range(num_vals): out += "{" + str(i) + ":" + dident[dtypes[i]] + "}" out += sep # remove additional separator from output return out[:-len(sep)]
3b65ad3b436b6c578fa2504a2ea4a475700432ce
1,050
from typing import Optional def products_with_low_stock(threshold: Optional[int] = None): """Return queryset with stock lower than given threshold.""" if threshold is None: threshold = settings.LOW_STOCK_THRESHOLD stocks = ( Stock.objects.select_related("product_variant") .values("product_variant__product_id", "warehouse_id") .annotate(total_stock=Sum("quantity")) ) return stocks.filter(total_stock__lte=threshold).distinct()
29bbdd3236b42bf3cef17f84a919ab201946c084
1,051
def robust_topological_sort(deps): """ A topological sorting algorithm which is robust enough to handle cyclic graphs. First, we bucket nodes into strongly connected components (we use Tarjan's linear algorithm for that). Then, we topologically sort these buckets grouping sibling buckets into sets. :param deps: a dictionary representing the dependencies between nodes :return: groups of buckets (a bucket is a strongly connected component) sorted bottom-up >>> deps1 = {'S':{'S','X', 'A'}, 'X':{'Y', 'B'}, 'Y':{'Z'}, 'Z':{'X'}, 'A':{'B'}, 'B':{}} >>> expected = [frozenset({frozenset({'B'})}), frozenset({frozenset({'A'}), frozenset({'Y', 'X', 'Z'})}), frozenset({frozenset({'S'})})] >>> order = robust_topological_sort(deps1) >>> order == expected True """ # correspondences between nodes and buckets (strongly connected components) n2c = defaultdict(None) components = tarjan(deps) for i, component in enumerate(components): for v in component: n2c[v] = i # find the dependencies between strongly connected components cdeps = defaultdict(set) for head, tail in deps.items(): hc = n2c[head] for t in tail: tc = n2c[t] if hc != tc: cdeps[hc].add(tc) # topsort buckets and translate bucket ids back into nodes return deque(frozenset(components[c] for c in group) for group in topological_sort(cdeps))
fb2b70f21ccb97880767e73362b46e27804c2d17
1,052
import inspect import functools import warnings def deprecated(reason): """ This is a decorator which can be used to mark functions and classes as deprecated. It will result in a warning being emitted when the function is used. From https://stackoverflow.com/a/40301488 """ string_types = (type(b""), type(u"")) if isinstance(reason, string_types): # The @deprecated is used with a 'reason'. # # .. code-block:: python # # @deprecated("please, use another function") # def old_function(x, y): # pass def decorator(func1): if inspect.isclass(func1): fmt1 = "Call to deprecated class {name} ({reason})." else: fmt1 = "Call to deprecated function {name} ({reason})." @functools.wraps(func1) def new_func1(*args, **kwargs): warnings.simplefilter("always", DeprecationWarning) warnings.warn( fmt1.format(name=func1.__name__, reason=reason), category=DeprecationWarning, stacklevel=2, ) warnings.simplefilter("default", DeprecationWarning) return func1(*args, **kwargs) return new_func1 return decorator elif inspect.isclass(reason) or inspect.isfunction(reason): # The @deprecated is used without any 'reason'. # # .. code-block:: python # # @deprecated # def old_function(x, y): # pass func2 = reason if inspect.isclass(func2): fmt2 = "Call to deprecated class {name}." else: fmt2 = "Call to deprecated function {name}." @functools.wraps(func2) def new_func2(*args, **kwargs): warnings.simplefilter("always", DeprecationWarning) warnings.warn( fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2, ) warnings.simplefilter("default", DeprecationWarning) return func2(*args, **kwargs) return new_func2 else: raise TypeError(repr(type(reason)))
1b75306b9b712caf3cd6c8425d2344b8ca170fcb
1,053
import torch def rotate_tensor(l: torch.Tensor, n: int = 1) -> torch.Tensor: """Roate tensor by n positions to the right Args: l (torch.Tensor): input tensor n (int, optional): positions to rotate. Defaults to 1. Returns: torch.Tensor: rotated tensor """ return torch.cat((l[n:], l[:n]))
9cdaa7be718f0676ad85e05b01ee918459697c60
1,054
def generate_all_fish( n_fish, n_replica_fish, channel, interaction, k_coh, k_ar, alpha, lim_neighbors, weights = [1], neighbor_weights=None, fish_max_speeds=None, clock_freqs=None, verbose=False, names=None ): """Generate both replica and regular fish Arguments: n_fish {int} -- Number of ideal fish to generate n_replica_fish {int} -- Number of replica fish to generate channel {Channel} -- Channel instance interaction {Interaction} -- Interaction instance k_coh {float} -- Parameter to Delight Fish k_ar {float} -- Weighting of neighbors in Delight Fish alpha {int} -- Goal distance from neighbor for Delight Fish lim_neighbors {list} -- Tuple of min and max neighbors weights {float|list} -- List of weights for replica fish learned function neighbor_weight {float|list} -- List of neighbor weights fish_max_speeds {float|list} -- List of max speeds clock_freqs {int|list} -- List of clock speeds names {list} -- List of names for your replica fish """ n = n_fish + n_replica_fish if neighbor_weights is None: neighbor_weights = [1.0] * n elif not isinstance(neighbor_weights, list): neighbor_weights = [neighbor_weights] * n if fish_max_speeds is None: fish_max_speeds = [1.0] * n elif not isinstance(fish_max_speeds, list): fish_max_speeds = [fish_max_speeds] * n if clock_freqs is None: clock_freqs = [1] * n elif not isinstance(clock_freqs, list): clock_freqs = [clock_freqs] * n if names is None: names = ['Unnamed'] * n all_fish = [] for i in range(n_fish): all_fish.append(Fish( id=i, channel=channel, interaction=interaction, k_coh = k_coh, k_ar = k_ar, alpha = alpha, lim_neighbors=lim_neighbors, neighbor_weight=neighbor_weights[i], fish_max_speed=fish_max_speeds[i], clock_freq=clock_freqs[i], verbose=verbose, name=names[i] )) for i in range(n_fish, n_fish + n_replica_fish): all_fish.append(ReplicaFish( id=i, channel=channel, interaction=interaction, weights = weights, fish_max_speed=fish_max_speeds[i], clock_freq=clock_freqs[i], name=names[i], verbose=verbose )) return all_fish
3924235d7bdcf25a91dcb1ec40220b761b85f15f
1,055
def allclose(a, b): """ close to machine precision """ return np.allclose(a, b, rtol=1e-14, atol=1e-14)
ad7ee29d7432947aec0030936985b456a5919eaa
1,056
def check_pwhash(pwhash, password): """Check a password against a given hash value. Since many forums save md5 passwords with no salt and it's technically impossible to convert this to an sha hash with a salt we use this to be able to check for plain passwords:: plain$$default md5 passwords without salt:: md5$$c21f969b5f03d33d43e04f8f136e7682 md5 passwords with salt:: md5$123456$7faa731e3365037d264ae6c2e3c7697e sha passwords:: sha$123456$118083bd04c79ab51944a9ef863efcd9c048dd9a Note that the integral passwd column in the table is only 60 chars long. If you have a very large salt or the plaintext password is too long it will be truncated. >>> check_pwhash('plain$$default', 'default') True >>> check_pwhash('sha$$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'password') True >>> check_pwhash('sha$$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'wrong') False >>> check_pwhash('md5$xyz$bcc27016b4fdceb2bd1b369d5dc46c3f', u'example') True >>> check_pwhash('sha$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'password') False >>> check_pwhash('md42$xyz$bcc27016b4fdceb2bd1b369d5dc46c3f', 'example') False """ if isinstance(password, unicode): password = password.encode('utf-8') if pwhash.count('$') < 2: return False method, salt, hashval = pwhash.split('$', 2) if method == 'plain': return hashval == password elif method == 'md5': h = md5() elif method == 'sha': h = sha1() else: return False h.update(salt) h.update(password) return h.hexdigest() == hashval
618cdc8a9f7f7d7062e1e0ae26cf81157a8dbba7
1,057
def make_markov_model(tweets): """Wrapper around making Markov Chain""" return markovify.Text(" ".join(tweets))
0bd98d1a2f3a5aae37591389b06d402073f1a7ec
1,058
def slice_image(sitk_image, start=(0, 0, 0), end=(-1, -1, -1)): """"Returns the `sitk_image` sliced from the `start` index (x,y,z) to the `end` index. """ size = sitk_image.GetSize() assert len(start) == len(end) == len(size) # replace -1 dim index placeholders with the size of that dimension end = [size[i] if end[i] == -1 else end[i] for i in range(len(end))] slice_filter = sitk.SliceImageFilter() slice_filter.SetStart(start) slice_filter.SetStop(end) return slice_filter.Execute(sitk_image)
eda4477c016d1130bb185a5793409ff95b9cd44c
1,059
def MakeGlyphs(src, reverseNormals): """ Glyph the normals on the surface. You may need to adjust the parameters for maskPts, arrow and glyph for a nice appearance. :param: src - the surface to glyph. :param: reverseNormals - if True the normals on the surface are reversed. :return: The glyph object. """ # Sometimes the contouring algorithm can create a volume whose gradient # vector and ordering of polygon (using the right hand rule) are # inconsistent. vtkReverseSense cures this problem. reverse = vtk.vtkReverseSense() # Choose a random subset of points. maskPts = vtk.vtkMaskPoints() maskPts.SetOnRatio(5) maskPts.RandomModeOn() if reverseNormals: reverse.SetInputData(src) reverse.ReverseCellsOn() reverse.ReverseNormalsOn() maskPts.SetInputConnection(reverse.GetOutputPort()) else: maskPts.SetInputData(src) # Source for the glyph filter arrow = vtk.vtkArrowSource() arrow.SetTipResolution(16) arrow.SetTipLength(0.3) arrow.SetTipRadius(0.1) glyph = vtk.vtkGlyph3D() glyph.SetSourceConnection(arrow.GetOutputPort()) glyph.SetInputConnection(maskPts.GetOutputPort()) glyph.SetVectorModeToUseNormal() glyph.SetScaleFactor(1) glyph.SetColorModeToColorByVector() glyph.SetScaleModeToScaleByVector() glyph.OrientOn() glyph.Update() return glyph
0bb28c943a2c371f5e536851208ac0d4b09cd51a
1,060
def get_tags_categorys(self): """02返回添加文档的变量""" tags = Tag.all() categorys = Category.all() return tags, categorys
557e5182dd3dbf3571e005c4e105a20e2cdd3dd1
1,061
import sys def main(): """Operations executed when calling this script from the command line""" args = ArgparseUserOptions( description=parser_description, args_dict_list=[required_args_dict, optional_args_dict], epilog=__doc__, ).parse_args(sys.argv[1:]) return args
95f99464384ba08b0ac5b1295f1562493f8efcbf
1,062
import pprint import warnings def single_mode_constant_rotation(**kwargs): """Return WaveformModes object a single nonzero mode, with phase proportional to time The waveform output by this function will have just one nonzero mode. The behavior of that mode will be fairly simple; it will be given by exp(i*omega*t). Note that omega can be complex, which gives damping. Parameters ---------- s : int, optional Spin weight of the waveform field. Default is -2. ell, m : int, optional The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)). ell_min, ell_max : int, optional Smallest and largest ell values present in the output. Default values are abs(s) and 8. data_type : int, optional Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than `h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution of supertranslations (as opposed to the indirect contribution, which involves moving points around). t_0, t_1 : float, optional Beginning and end of time. Default values are -20. and 20. dt : float, optional Time step. Default value is 0.1. omega : complex, optional Constant of proportionality such that nonzero mode is exp(i*omega*t). Note that this can be complex, which implies damping. Default is 0.5. """ s = kwargs.pop("s", -2) ell = kwargs.pop("ell", abs(s)) m = kwargs.pop("m", -ell) ell_min = kwargs.pop("ell_min", abs(s)) ell_max = kwargs.pop("ell_max", 8) data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)]) t_0 = kwargs.pop("t_0", -20.0) t_1 = kwargs.pop("t_1", 20.0) dt = kwargs.pop("dt", 1.0 / 10.0) t = np.arange(t_0, t_1 + dt, dt) n_times = t.size omega = complex(kwargs.pop("omega", 0.5)) data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex) data[:, sf.LM_index(ell, m, ell_min)] = np.exp(1j * omega * t) if kwargs: warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}") return scri.WaveformModes( t=t, data=data, ell_min=ell_min, ell_max=ell_max, frameType=scri.Inertial, dataType=data_type, r_is_scaled_out=True, m_is_scaled_out=True, )
cc31bf0587ff397cb79c42863efd3d8173cddc72
1,063
def get_file(file_pattern: list, sub_type: str = None) -> list: """Get a subset from file patterns that belong to a sub-type. If no sub-type is specified, return all file patterns. Args: file_pattern (list): The input file patterns sub_type (str, optional): A string to search in file patterns. Defaults to None. Raises: ValueError: No file pattern matches the sub-type provided. Returns: list: A filtered sub list of file patterns. """ if sub_type is None: return file_pattern result = [] for entry in file_pattern: if sub_type in entry: result.append(entry) if len(result) < 1: raise ValueError( "No file found for sub-type {}: {}".format(sub_type, file_pattern) ) else: return result
7d39c05fa8a1f7a9370de459472ecf7070aa6569
1,064
def etopo_subset(llcrnrlon=None, urcrnrlon=None, llcrnrlat=None, urcrnrlat=None, tfile='dap', smoo=False, subsample=False): """Get a etopo subset. Should work on any netCDF with x, y, data http://www.trondkristiansen.com/wp-content/uploads/downloads/ 2011/07/contourICEMaps.py Example ------- >>> import matplotlib.pyplot as plt >>> offset = 5 >>> #tfile = './ETOPO1_Bed_g_gmt4.grd' >>> tfile = 'dap' >>> llcrnrlon, urcrnrlon, llcrnrlat, urcrnrlat = -43, -30, -22, -17 >>> lons, lats, bathy = etopo_subset(llcrnrlon - offset, ... urcrnrlon + offset, ... llcrnrlat - offset, ... urcrnrlat + offset, ... smoo=True, tfile=tfile) >>> fig, ax = plt.subplots() >>> cs = ax.pcolormesh(lons, lats, bathy) >>> _ = ax.axis([-42, -28, -23, -15]) >>> _ = ax.set_title(tfile) """ if tfile == 'dap': tfile = 'http://opendap.ccst.inpe.br/Misc/etopo2/ETOPO2v2c_f4.nc' etopo = Dataset(tfile, 'r') lons = etopo.variables["x"][:] lats = etopo.variables["y"][:] res = get_indices(llcrnrlat, urcrnrlat, llcrnrlon, urcrnrlon, lons, lats) lon, lat = np.meshgrid(lons[res[0]:res[1]], lats[res[2]:res[3]]) bathy = etopo.variables["z"][int(res[2]):int(res[3]), int(res[0]):int(res[1])] if smoo: bathy = laplace_filter(bathy, M=None) if subsample: bathy = bathy[::subsample] lon, lat = lon[::subsample], lat[::subsample] return lon, lat, bathy
6af3b6773c7ef28cde75b7708370819fd5637697
1,065
def get_all_report_data(db): """ Gets all report data for pre report page """ query = r'SELECT * FROM report WHERE relevent=1 ORDER BY id DESC' return db_get(db, query)
727c4c9ec2125747237d40d7f0dd019b3d116d00
1,066
def find_center_projection(mat1, mat2, flip=True, chunk_height=None, start_row=None, denoise=True, norm=False, use_overlap=False): """ Find the center-of-rotation (COR) using projection images at 0-degree and 180-degree based on a method in Ref. [1]. Parameters ---------- mat1 : array_like 2D array. Projection image at 0-degree. mat2 : array_like 2D array. Projection image at 180-degree. flip : bool, optional Flip the 180-degree projection in the left-right direction if True. chunk_height : int or float, optional Height of the sub-area of projection images. If a float is given, it must be in the range of [0.0, 1.0]. start_row : int, optional Starting row used to extract the sub-area. denoise : bool, optional Apply the Gaussian filter if True. norm : bool, optional Apply the normalization if True. use_overlap : bool, optional Use the combination of images in the overlap area for calculating correlation coefficients if True. Returns ------- cor : float Center-of-rotation. References ---------- .. [1] https://doi.org/10.1364/OE.418448 """ (nrow, ncol) = mat1.shape if flip is True: mat2 = np.fliplr(mat2) win_width = ncol // 2 if chunk_height is None: chunk_height = int(0.1 * nrow) if isinstance(chunk_height, float): if 0.0 < chunk_height <= 1.0: chunk_height = int(chunk_height * nrow) else: chunk_height = int(0.1 * nrow) chunk_height = np.clip(chunk_height, 1, nrow - 1) if start_row is None: start = nrow // 2 - chunk_height // 2 elif start_row < 0: start = nrow + start_row - chunk_height // 2 else: start = start_row - chunk_height // 2 stop = start + chunk_height start = np.clip(start, 0, nrow - chunk_height - 1) stop = np.clip(stop, chunk_height, nrow - 1) mat1_roi = mat1[start: stop] mat2_roi = mat2[start: stop] (overlap, side, _) = find_overlap(mat1_roi, mat2_roi, win_width, side=None, denoise=denoise, norm=norm, use_overlap=use_overlap) if side == 0: cor = overlap / 2.0 - 1.0 else: cor = ncol - overlap / 2.0 - 1.0 return cor
21661a6b9ed33a220ede918954ac18a420e638ae
1,067
def parse_date(str): """ parsing given str to date """ ymd = str.split('-') return date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
29d0f79e2428e315c072c7801d927154c3bfee57
1,068
def mark_as_widget(view): """ Marks @view as a widget so we can later inspect that attribute, for example, when hiding panels in _vi_enter_normal_mode. Used prominently by '/', '?' and ':'. XXX: This doesn't always work as we expect. For example, changing settings to a panel created instants before does not make those settings visible when the panel is activated. Investigate. We still need this so that contexts will ignore widgets, though. However, the fact that they are widgets should suffice to disable Vim keys for them... """ view.settings().set('is_vintageous_widget', True) return view
965555660b82f834e09ba3ffc985755d4fd7fa66
1,069
def module_name(ctx, f): """Given Haskell source file path, turn it into a dot-separated module name. module_name( ctx, "some-workspace/some-package/src/Foo/Bar/Baz.hs", ) => "Foo.Bar.Baz" Args: ctx: Rule context. f: Haskell source file. Returns: string: Haskell module name. """ return _drop_extension(_rel_path_to_module(ctx, f).replace('/', '.'))
77a38f62211a827ac8fe9af0cc36636b11e561d5
1,070
def store(key): """Gets the configured default store. The default is PickleStore :return store: Store object """ global __stores if __stores is None: __stores = {} if key not in __stores: __stores[key] = __configuration[STORE](key) return __stores[key]
76197d8cedc44e15a75c81f1bcb07d3a4e59e021
1,071
def get_label_for_line(line, leg): """ Can't remember what I was using this for but seems useful to keep """ # leg = line.figure.legends[0] # leg = line.axes.get_legend() for h, t in zip(leg.legendHandles, leg.texts): if h.get_label() == line.get_label(): return t.get_text()
4180ae7fd7fe5b98ebafa20fbdf2528205e4ec31
1,072
def _node_parent_listener(target, value, oldvalue, initiator): """Listen for Node.parent being modified and update path""" if value != oldvalue: if value is not None: if target._root != (value._root or value): target._update_root(value._root or value) target._update_path(newparent=value) else: # This node just got orphaned. It's a new root target._update_root(target) target._update_path(newparent=target) return value
06c06b144c777f33673e2051f1d4173204720f65
1,073
import os def save_model_architecture(model, project_name, keras_model_type, cat_vocab_dict, model_options, chart_name="model_before"): """ This function saves the model architecture in a PNG file in the artifacts sub-folder of project_name folder """ if isinstance(project_name,str): if project_name == '': project_name = "deep_autoviml" else: print('Project name must be a string and helps create a folder to store model.') project_name = "deep_autoviml" save_model_path = model_options['save_model_path'] save_artifacts_path = os.path.join(save_model_path, "artifacts") try: plot_filename = os.path.join(save_artifacts_path,chart_name)+".png" print('\nSaving model architecture...') tf.keras.utils.plot_model(model = model, to_file=plot_filename, dpi=72, show_layer_names=True, rankdir="LR", show_shapes=True) print(' model architecture saved in file: %s' %plot_filename) except: print('Model architecture not saved due to error. Continuing...') plot_filename = "" return plot_filename
6cbe18b35bb503d3042458f45929b93091d5a2c7
1,074
import torch import typing def sequential_to_momentum_net(module: torch.nn.Sequential, split_dim=1, coupling_forward: typing.Optional[typing.List[typing.Optional[typing.Callable]]] = None, coupling_inverse: typing.Optional[typing.List[typing.Optional[typing.Callable]]] = None, memory_mode: MemoryModes = MemoryModes.autograd_function, target_device: str = "", fused_optimizer: FUSED_OPTIMIZER = None, residual: bool = False, beta: float = 0.9) -> ReversibleSequential: """ Creates a sequential MomentumNet by unrolling a nn.Sequential module and dispatching to `momentum_net()` :param module: An existing nn.Sequential module that should be converted into a ReversibleSequential module. :param split_dim: RevNets require two streams. This parameter specifies which dimension to split in half to create the two streams. `None` would mean the input gets replicated for both streams. It's usually best to split along the features, which is why the default (1) is compatible with convolutions. :param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the function output. For more information, look at the examples. default = revnet couplint :param coupling_inverse: The inverse of the coupling function. default = revnet inverse :param memory_mode: One of `MemoryModes`'s values. Some things are only supported in one mode while others might only be supported in another. default = autograd function (highest coverage but spotty XLA support) :param target_device: Specifies where the parameters should be moved to before computing the forward and backward pass. This allows efficient CPU-offloading. default = no offloading (keep parameters on the device they're on) :param fused_optimizer: Allows an optimizer step to run while the model is computing its backward pass. This means that the gradients don't have to be fully instantiated anymore and can improve speed when used with cpu-offload due to asynchronous compute. It expects a function that generates an optimizer from a list of parameters. (like Adam.__init__) default = no fused optimizer step :param residual: Whether to "undo" a residual stream or not. Using y = f(x0) + x0 + x1 is generally not a good idea, so this would subtract `x0` from y allowing you to patch existing residual modules without modifying their code. :param beta: MomentumNet beta value that controls how much of the velocity stream is kept. :return: Instantiated MomentumNet (instance of `ReversibleSequential`) """ return momentum_net(*maybe_residual_to_plain(module, residual), split_dim=split_dim, coupling_forward=coupling_forward, coupling_inverse=coupling_inverse, memory_mode=memory_mode, target_device=target_device, beta=beta, fused_optimizer=fused_optimizer)
269d45cf845555988c3284a88a7e3ca83fb697b5
1,075
def user_view(request, name): """Render the view page for users""" # argument is the login name, not the uuid in Cassandra user = User.find(name) if not user: return redirect("users:home") ctx = { "req_user": request.user, "user_obj": user, "groups": [Group.find(gname) for gname in user.groups], } return render(request, "users/view.html", ctx)
f7f5bc01d2b60bcca048e0b2183eefcc5f4eb907
1,076
def grelha_nr_colunas(g): """ grelha_nr_colunas: grelha --> inteiro positivo grelha_nr_colunas(g) devolve o numero de colunas da grelha g. """ return len(g[0])
740b06c186ad1455aecadfaf112f253fb434d5ff
1,077
def rmsd(array_a, array_b): """ Calculate the RMSD between two 1d arrays Parameters ---------- array_a, array_b : 1d numpy arrays The arrays to be compared Returns ------- rmsd : float The Root Mean Square Deviation of the elements of the array """ diff = array_a - array_b diff2 = np.square(diff) diff2_sum = np.sum(diff2) norm_diff2_sum = diff2_sum/len(array_a) rmsd = np.sqrt(norm_diff2_sum) return rmsd
7390cebff27d73bc9268cdc23e21c2d362bca2cc
1,078
def readFile(sFile, sMode = 'rb'): """ Reads the entire file. """ oFile = open(sFile, sMode); sRet = oFile.read(); oFile.close(); return sRet;
d44e8217ae7dcab1c826ccbbe80e066d76db31b5
1,079
def VI_cgivens_d( a, b): """ returns cos, sin, r """ c = vsip_cmplx_d(0.0,0.0) s = vsip_cmplx_d(0.0,0.0) r = vsip_cmplx_d(0.0,0.0) am = vsip_cmag_d(a) bm = vsip_cmag_d(b) if am == 0.0: r.r = b.r; r.i=b.i; s.r = 1.0; else: scale = am + bm; alpha = vsip_cmplx_d(a.r/am, a.i/am) scalesq = scale * scale norm = scale * sqrt((am*am)/scalesq + (bm * bm)/scalesq) c.r =am/norm s.r = (alpha.r * b.r + alpha.i * b.i)/norm s.i = (-alpha.r * b.i + alpha.i * b.r)/norm r.r = alpha.r * norm; r.i = alpha.i * norm return (c,s,r)
7ed08b3c583a805cd9a7b0dfcfb80eb67a054e1e
1,080
import json def documint_request_factory(request): """ Create a function that issues a request to a Documint endpoint. Status codes outside the 2xx range are treated as errors. If error responses are JSON then `DocumintError` is raised, otherwise `MalformedDocumintError` is raised. If the status code indicates success, the `IResponse` is returned. """ def _raise_error(data, response): if content_type(response.headers) == b'application/json': try: causes = json.loads(data).get(u'causes', []) raise DocumintError( causes=[DocumintErrorCause(cause.get(u'type'), cause.get(u'reason'), cause.get(u'description')) for cause in causes]) except ValueError: pass raise MalformedDocumintError(data) def _check_status(response): if 200 <= response.code < 300: return response d = response.content() d.addCallback(_raise_error, response) return d def _request(*a, **kw): d = request(*a, **kw) d.addCallback(_check_status) return d return _request
9dc4dcba0df1094c394dbe8d9424f874e3ac3169
1,081
import os def roipac_header(file_path, params): """ Function to obtain a header for roipac interferogram file or converted geotiff. """ rsc_file = os.path.join(params[cf.DEM_HEADER_FILE]) if rsc_file is not None: projection = parse_header(rsc_file)[ifc.PYRATE_DATUM] else: raise RoipacException('No DEM resource/header file is ' 'provided') if file_path.endswith('_dem.tif'): header_file = os.path.join(params[cf.DEM_HEADER_FILE]) elif file_path.endswith('_unw.tif'): base_file = file_path[:-8] header_file = base_file + '.unw.' + ROI_PAC_HEADER_FILE_EXT else: header_file = "%s.%s" % (file_path, ROI_PAC_HEADER_FILE_EXT) header = manage_header(header_file, projection) return header
743640ff38af24dcdc046727f0bf65fe243fba4a
1,082
import os import fnmatch def find_exe_in_path(exe, bypass_permissions_check=None, add_exe_to_path=None): """ Check that an executable exists in $PATH """ paths = os.environ["PATH"].split(os.pathsep) for path in paths: fullexe = os.path.join(path,exe) if os.path.exists(fullexe): if not bypass_permissions_check: check_file_executable(fullexe) if add_exe_to_path: path=fullexe return path elif os.path.isdir(path): # allow for filename filter matching exematch = fnmatch.filter(os.listdir(path),exe) if exematch and os.path.exists(os.path.join(path,exematch[0])): if not bypass_permissions_check: check_file_executable(os.path.join(path,exematch[0])) if add_exe_to_path: path=os.path.join(path,exematch[0]) return path return None
9e3919c0c479bf272e582937d62c45e826eb6d6e
1,083
def skip_for_tf2(f): """Decorator that skips tests when using TensorFlow 2.""" def test_wrapper(*args, **kwargs): """Wraps the decorated function to determine whether to skip.""" # Extract test case instance from args. self = args[0] try: # If tf.contrib doesn't exist, we are in TF 2.0. _ = tf.contrib _ = tf.contrib.estimator.regression_head( loss_reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE) except (AttributeError, ImportError): self.skipTest("Skipping test in TF 2.0.") return f(*args, **kwargs) return test_wrapper
02059cc9c8e6b83ab49dcd3b69d447fa3ec26324
1,084
import yaml def clean_logfile(logfile_lines,to_remove): """Remove yaml fields from a list of lines. Removes from a set of lines the yaml_fields contained in the to_remove list. Arguments: logfile_lines (list): list of the lines of the logfile. Generated from a file by e.g. :py:meth:`~io.IOBase.readlines`. to_remove (list): list of keys to remove from logfile_lines Returns: list of lines where the removed keys have as values the `"<folded>"` string """ line_rev=logfile_lines #list of the lines of the logfile #loop in the reversed from (such as to parse by blocks) extra_lines=20 #internal variable to be customized line_rev.reverse() #clean the log cleaned_logfile=[] removed=[] #for line in line_rev: #line_iter: while len(line_rev) >0: line=line_rev.pop() to_print=line #check if the line contains interesting information for remove_it in to_remove : stream_list=[] #line without comments valid_line=line.split('#')[0] spaces='nospace' #control that the string between the key and the semicolon is only spaces if remove_it in valid_line and ":" in valid_line: #print "here",remove_it,remove_it in valid_line and ":" in valid_line,valid_line starting_point=valid_line.find(remove_it) tmp_buf=valid_line[:starting_point] #find the closest comma to the staring point, if exists tmp_buf=tmp_buf[::-1] starting_comma=tmp_buf.find(',') if starting_comma <0: st=0 tmp_buf=tmp_buf[st:] tmp_buf=tmp_buf[::-1] tmp_buf=tmp_buf.strip(' ') #print "there",tmp_buf,'starting',starting_point,len(tmp_buf) valid_line= valid_line[starting_point+len(remove_it):] spaces= valid_line[1:valid_line.find(':')] #if remove_it+':' in line.split('#')[0]: if len(spaces.strip(' ')) == 0 and len(tmp_buf)==0: #this means that the key has been found #creates a new Yaml document starting from the line #treat the rest of the line following the key to be removed header=''.join(line.split(':')[1:]) header=header.rstrip()+'\n' #eliminate the anchor header=header.lstrip(' ') header=header.lstrip('*') if len(header) > 0 : stream_list.append(header) #part to be printed, updated to_print = line.split(':')[0] + ": <folded> \n" #then check when the mapping will end: while True: #create a stream with extra_lines block for i in range(0,min(extra_lines,len(line_rev))): stream_list.append(line_rev.pop()) #create a stream to be parsed stream=''.join(stream_list) #then parse the stream until the last valid position has been found try: for i in yaml.parse(stream,Loader=yaml.CLoader): endpos=i.end_mark.index except Exception(e): # print 'error',str(e),stream #convert back the valid stream into a list #if needed the stream can be loaded into a document item_list=stream[:endpos].split('\n') #if lengths are different there is no need to add lines if len(item_list) != len(stream_list): #last line might be shorter, therefore treat it separately last_line=item_list.pop() #purge the stream for item in item_list: stream_list.remove(item+'\n') #extract the remaining line which should be compared with the last one strip_size=len(last_line.rstrip()) if strip_size > 0: first_line=stream_list.pop(0)[strip_size:] if '*' in first_line or '&' in first_line: first_line='' #eliminate anchors else: first_line='' #then put the rest in the line to be treated to_print.rstrip('\n') to_print += first_line+'\n' # the item has been found break stream_list.reverse() #put back the unused part in the document line_rev.extend(stream_list) # mark that the key has been removed if (remove_it not in removed): removed.append(remove_it) write('removed: ',remove_it) # then print out the line cleaned_logfile.append(to_print) # check that everything has been removed, at least once if (set(removed) != set(to_remove)): write('WARNING, not all the requested items have been removed!') write('To_remove : ',to_remove) write('removed : ',removed) write('Difference: ',list(set(to_remove) - set(removed) )) return cleaned_logfile
5e066584488230e777684fcf4e8d25784343afaf
1,085
def no_red_sum(tokens): """Using import json is cheating, let's parse it ourselves in a sinlge pass. Hope you like stacks.""" sums = [0] stack = [] is_red = False for token in tokens: if token == 'red' and not is_red and stack[-1] == '{': is_red = True sums[-1] = 0 stack.append('red') elif token == '{': sums.append(0) stack.append('{') elif token == '}': last_sum = sums.pop() sums[-1] += last_sum if stack[-1] == 'red': stack.pop() is_red = False stack.pop() elif token == '[': stack.append('[') sums.append(0) elif token == ']': stack.pop() last_sum = sums.pop() sums[-1] += last_sum elif not is_red: sums[-1] += neg_safe_cast(token) assert len(sums) == 1 return sums.pop()
7945618bcc76c03b457cacf4f995e767d5b6160c
1,086
def get_all_projects(): """ Return a list with all the projects (open and closed). """ return gazu.project.all_projects()
7279d46e9049f3ff9802dcc93b8e41b2e118c9a2
1,087
def install(opts): """ Install one or more resources. """ resources = _load(opts.resources, opts.output_dir) if opts.all: opts.resource_names = ALL success = _install(resources, opts.resource_names, opts.mirror_url, opts.destination, opts.skip_top_level) if success: if not opts.quiet: print("All resources successfully installed") return 0 else: if not opts.quiet: invalid = _invalid(resources, opts.resource_names) print("Unable to install some resources: {}".format(', '.join(invalid))) return 1
9487490eb9ccb13ce7f9797defacf823161a60a9
1,088
import torch def seq2seq_att(mems, lengths, state, att_net=None): """ :param mems: [B, T, D_mem] This are the memories. I call memory for this variable because I think attention is just like read something and then make alignments with your memories. This memory here is usually the input hidden state of the encoder. :param lengths: [B] :param state: [B, D_state] I call state for this variable because it's the state I percepts at this time step. :param att_net: This is the attention network that will be used to calculate the alignment score between state and memories. input of the att_net is mems and state with shape: mems: [exB, D_mem] state: [exB, D_state] return of the att_net is [exB, 1] So any function that map a vector to a scalar could work. :return: [B, D_result] """ d_state = state.size(1) if not att_net: return state else: batch_list_mems = [] batch_list_state = [] for i, l in enumerate(lengths): b_mems = mems[i, :l] # [T, D_mem] batch_list_mems.append(b_mems) b_state = state[i].expand(b_mems.size(0), d_state) # [T, D_state] batch_list_state.append(b_state) packed_sequence_mems = torch.cat(batch_list_mems, 0) # [sum(l), D_mem] packed_sequence_state = torch.cat(batch_list_state, 0) # [sum(l), D_state] align_score = att_net(packed_sequence_mems, packed_sequence_state) # [sum(l), 1] # The score grouped as [(a1, a2, a3), (a1, a2), (a1, a2, a3, a4)]. # aligned_seq = packed_sequence_mems * align_score start = 0 result_list = [] for i, l in enumerate(lengths): end = start + l b_mems = packed_sequence_mems[start:end, :] # [l, D_mems] b_score = align_score[start:end, :] # [l, 1] softed_b_score = F.softmax(b_score.transpose(0, 1)).transpose(0, 1) # [l, 1] weighted_sum = torch.sum(b_mems * softed_b_score, dim=0, keepdim=False) # [D_mems] result_list.append(weighted_sum) start = end result = torch.stack(result_list, dim=0) return result
992fa8329443a2505c6ff0d83e9c34e69be620d4
1,089
def convert_for_webkit(new_path, filename, reference_support_info, host=Host()): """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit. Returns the list of modified properties and the modified text if the file was modifed, None otherwise.""" contents = host.filesystem.read_binary_file(filename) converter = _W3CTestConverter(new_path, filename, reference_support_info, host) if filename.endswith('.css'): return converter.add_webkit_prefix_to_unprefixed_properties(contents.decode('utf-8')) else: converter.feed(contents.decode('utf-8')) converter.close() return converter.output()
098774b42f9086b1b61dc231318731ab7eb1a998
1,090
from typing import Callable from typing import Optional def bond(fn: Callable[..., Array], displacement_or_metric: DisplacementOrMetricFn, static_bonds: Optional[Array]=None, static_bond_types: Optional[Array]=None, ignore_unused_parameters: bool=False, **kwargs) -> Callable[..., Array]: """Promotes a function that acts on a single pair to one on a set of bonds. TODO(schsam): It seems like bonds might potentially have poor memory access. Should think about this a bit and potentially optimize. Args: fn: A function that takes an ndarray of pairwise distances or displacements of shape [n, m] or [n, m, d_in] respectively as well as kwargs specifying parameters for the function. fn returns an ndarray of evaluations of shape [n, m, d_out]. metric: A function that takes two ndarray of positions of shape [spatial_dimension] and [spatial_dimension] respectively and returns an ndarray of distances or displacements of shape [] or [d_in] respectively. The metric can optionally take a floating point time as a third argument. static_bonds: An ndarray of integer pairs wth shape [b, 2] where each pair specifies a bond. static_bonds are baked into the returned compute function statically and cannot be changed after the fact. static_bond_types: An ndarray of integers of shape [b] specifying the type of each bond. Only specify bond types if you want to specify bond parameters by type. One can also specify constant or per-bond parameters (see below). ignore_unused_parameters: A boolean that denotes whether dynamically specified keyword arguments passed to the mapped function get ignored if they were not first specified as keyword arguments when calling `smap.bond(...)`. kwargs: Arguments providing parameters to the mapped function. In cases where no bond type information is provided these should be either 1) a scalar or 2) an ndarray of shape [b]. If bond type information is provided then the parameters should be specified as either 1) a scalar or 2) an ndarray of shape [max_bond_type]. Returns: A function fn_mapped. Note that fn_mapped can take arguments bonds and bond_types which will be bonds that are specified dynamically. This will incur a recompilation when the number of bonds changes. Improving this state of affairs I will leave as a TODO until someone actually uses this feature and runs into speed issues. """ # Each call to vmap adds a single batch dimension. Here, we would like to # promote the metric function from one that computes the distance / # displacement between two vectors to one that acts on two lists of vectors. # Thus, we apply a single application of vmap. merge_dicts = partial(util.merge_dicts, ignore_unused_parameters=ignore_unused_parameters) def compute_fn(R, bonds, bond_types, static_kwargs, dynamic_kwargs): Ra = R[bonds[:, 0]] Rb = R[bonds[:, 1]] _kwargs = merge_dicts(static_kwargs, dynamic_kwargs) _kwargs = _kwargs_to_bond_parameters(bond_types, _kwargs) # NOTE(schsam): This pattern is needed due to JAX issue #912. d = vmap(partial(displacement_or_metric, **dynamic_kwargs), 0, 0) dr = d(Ra, Rb) return high_precision_sum(fn(dr, **_kwargs)) def mapped_fn(R: Array, bonds: Optional[Array]=None, bond_types: Optional[Array]=None, **dynamic_kwargs) -> Array: accum = f32(0) if bonds is not None: accum = accum + compute_fn(R, bonds, bond_types, kwargs, dynamic_kwargs) if static_bonds is not None: accum = accum + compute_fn( R, static_bonds, static_bond_types, kwargs, dynamic_kwargs) return accum return mapped_fn
4a4fefaf8fce84e632634fef778a7508cd5412b8
1,091
import re def clean_repeated_symbols(text): """ Filters text, replacing symbols repeated more than twice (not allowed in most languages) with a single repetition of the symbol. :param text: the text to be filtered :type: str :return: the filtered text :type: str """ pattern = re.compile(r"(.)\1{2,}", re.DOTALL) return pattern.sub(r"\1\1", text)
bfa758994cfae716caaa715d5a990416a300f9d9
1,092
def sample(x,y, numSamples): """ gives numSamples samples from the distribution funciton fail parameters """ y /= y.sum() return np.random.choice(x, size=numSamples, replace=True, p=y)
4cfbb6977bcd5fa43f27de40b15beff487f1c071
1,093
def make_path_strictly_increase(path): """ Given a warping path, remove all rows that do not strictly increase from the row before """ toKeep = np.ones(path.shape[0]) i0 = 0 for i in range(1, path.shape[0]): if np.abs(path[i0, 0] - path[i, 0]) >= 1 and np.abs(path[i0, 1] - path[i, 1]) >= 1: i0 = i else: toKeep[i] = 0 return path[toKeep == 1, :]
1a5043bdb469c9dd3f9bf57e1b9752ebd8567182
1,094
from typing import OrderedDict import itertools import logging def generate_frequency_spectrum(samples, wild_threshold): """ Generates the site frequency spectrum for a given set of samples :param samples: List of sample accession codes :param wild_threshold: The index position of the last wild sample (used for resolving group membership) :return: """ # open all files for reading filehandles = [open("vcf/{}.vcf".format(sample), 'r') for sample in samples] # skip over the block comments (which are variable length) for fin in filehandles: while fin.readline().startswith("##"): pass # keep count of SNP sites snpcount = 0 # store the SNPs in a dictionary variants = defaultdict(OrderedDict) try: # get the next line from all the files for lines in itertools.izip(*filehandles): try: # convert each line from a string to a list lines = [line.split() for line in lines] # rephase the files, if not all the sequence positions match if len(set(line[POS] for line in lines)) != 1: rephase_files(lines, filehandles) # TODO drop sites with coverage lower than 1st quartile or higher than 3rd quartile # get the outgroup outgroup = lines[0] # get the chromosome number and position chrm = int(outgroup[CHROM]) pos = int(outgroup[POS]) # skip all sites with indels if 'INDEL' in outgroup[INFO]: raise InDelException(chrm, pos, outgroup[INFO]) # get the reference and outgroup alleles ref_allele = outgroup[REF] out_allele = outgroup[ALT].replace('.', ref_allele) # get the genotype of the outgroup out_genotype = outgroup[GENOTYPE].split(':')[0] # skip het sites in the outgroup if out_genotype == '0/1': raise HeterozygousException(chrm, pos, outgroup[GENOTYPE]) # keep track of all the observed alleles at this site all_alleles = {ref_allele, out_allele} # dictionary for counting observations frequencies = {} # process all the samples (omitting the outgroup) for idx, line in enumerate(lines[1:]): # skip all sites with indels if 'INDEL' in line[INFO]: raise InDelException(chrm, pos, line[REF]) # get the alt allele for this sample alt_allele = line[ALT].replace('.', ref_allele) # get the genotype of the sample genotype = line[GENOTYPE].split(':')[0] # resolve the genotype if genotype == '0/0': sample_alleles = [ref_allele, ref_allele] # 0/0 - the sample is homozygous reference elif genotype == '0/1': sample_alleles = [ref_allele, alt_allele] # 0/1 - the sample is heterozygous elif genotype == '1/1': sample_alleles = [alt_allele, alt_allele] # 1/1 - the sample is homozygous alternate # add them to the all alleles set all_alleles |= set(sample_alleles) # skip sites with more than two alleles observed across all samples if len(all_alleles) > 2: raise PolyallelicException(chrm, pos, all_alleles) # use the index threshold to determine which group this sample belongs to group = 'wild' if idx < wild_threshold else 'doms' # count the observations of each allele for each group for allele in sample_alleles: # initialise the counter, if necessary if allele not in frequencies: frequencies[allele] = {'wild': 0, 'doms': 0} # increment the counter frequencies[allele][group] += 1 if len(all_alleles) == 1: # skip homozygous sites, because there is nothing to coalesce raise HomozygousException(chrm, pos, all_alleles) if len(frequencies) == 1: # deal with fixed allele sites by initilising the missing allele to 0 for allele in all_alleles: if allele not in frequencies: frequencies[allele] = {'wild': 0, 'doms': 0} # add the site to the SNP dictionary (so we can look up the flanking bases when we're done here) variants[chrm][pos] = dict(ref=ref_allele, out=out_allele, frq=frequencies) # increment the SNP count snpcount += 1 except (InDelException, PolyallelicException, HeterozygousException, HomozygousException) as e: # skip all sites containing indels, polyallelic sites in ingroup samples, heterozygous sites in the # outgroup, or homozygous sites across all the populations logging.debug('Skipping site chr{} {} because of a {} - {}'.format(outgroup[CHROM], outgroup[POS], type(e).__name__, e)) except StopIteration as e: logging.debug('Reached the end of one of the files {}'.format(e)) pass # close all the open files for fin in filehandles: fin.close() # reopen the outgroup file fin = open("vcf/{}.vcf".format(samples.iterkeys().next()), 'r') # skip over the block comments (which are variable length) while fin.readline().startswith("##"): pass # start composing the output file output = 'Rabbit\tHare\tAllele1\tWLD\tDOM\tAllele2\tWLD\tDOM\tGene\tPosition\n' for chrm in variants: for pos in variants[chrm]: # Ref | Out | Allele1 | WILD | DOMS | Allele2 | WILD | DOMS | Gene | Position # fetch the flanking bases for the reference and outgroup sequeneces (ref_left, ref_right, out_left, out_right) = fetch_flanking_bases(chrm, pos, fin) # add the output row output += '{ref_left}{ref}{ref_right}\t{out_left}{out}{out_right}\t'.format(ref_left=ref_left, ref=variants[chrm][pos]['ref'], ref_right=ref_right, out_left=out_left, out=variants[chrm][pos]['out'], out_right=out_right) for allele, count in variants[chrm][pos]['frq'].iteritems(): # output the allele counts output += '{alle}\t{wild}\t{doms}\t'.format(alle=allele, wild=count['wild'], doms=count['doms']) # add the chromosome name and position output += 'chr{chrm}\t{pos}\n'.format(chrm=chrm, pos=pos) fin.close() logging.debug('Finished! Found {} suitable SNP sites'.format(snpcount)) return output
f953c09cc22d5f4744a9f505daefbe3b4e72cc92
1,095
def set_group_selector(*args): """set_group_selector(sel_t grp, sel_t sel) -> int""" return _idaapi.set_group_selector(*args)
1fbf3807791bf94511f4c7da52278db2815c757e
1,096
def data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_latency_characteristictraffic_property_name_get(uuid, node_uuid, node_rule_group_uuid, traffic_property_name): # noqa: E501 """data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_latency_characteristictraffic_property_name_get returns tapi.topology.LatencyCharacteristic # noqa: E501 :param uuid: Id of topology :type uuid: str :param node_uuid: Id of node :type node_uuid: str :param node_rule_group_uuid: Id of node-rule-group :type node_rule_group_uuid: str :param traffic_property_name: Id of latency-characteristic :type traffic_property_name: str :rtype: TapiTopologyLatencyCharacteristic """ return 'do some magic!'
5f0aff58f5f5e7f72f6622fdb8a400b03f6aae15
1,097
def getPendingReviewers(db, review): """getPendingReviewers(db, review) -> dictionary Returns a dictionary, like the ones returned by getReviewersAndWatchers(), but with details about remaining unreviewed changes in the review. Changes not assigned to a reviewer are handled the same way.""" cursor = db.cursor() cursor.execute("""SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file FROM reviewfiles LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id) WHERE reviewfiles.review=%s AND reviewfiles.state='pending'""", (review.id,)) reviewers = {} for user_id, changeset_id, file_id in cursor.fetchall(): reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id) return reviewers
869a6bb752c4e7c1e40a0000b3aceb62adc28ce1
1,098
import base64 def base64_encode_string(string): # type: (str or bytes) -> str """Base64 encode a string :param str or bytes string: string to encode :rtype: str :return: base64-encoded string """ if on_python2(): return base64.b64encode(string) else: return str(base64.b64encode(string), 'ascii')
0c13ca527171fecdbc5eb93376c6019c0b95e2b7
1,099