content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import PIL def resize_img(img, size, keep_aspect_ratio=True): """resize image using pillow Args: img (PIL.Image): pillow image object size(int or tuple(in, int)): width of image or tuple of (width, height) keep_aspect_ratio(bool): maintain aspect ratio relative to width Returns: (PIL.Image): pillow image """ if isinstance(size, int): size = (size, size) # get ratio width, height = img.size requested_width = size[0] if keep_aspect_ratio: ratio = width / requested_width requested_height = height / ratio else: requested_height = size[1] size = (int(requested_width), int(requested_height)) img = img.resize(size, resample=PIL.Image.LANCZOS) return img
b3ddc2c929fa530f2af612c3021802f7bd1ed285
3,658,260
from typing import Tuple def calculate_clip_from_vd(circuit: DiodeCircuit, v_d: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """ :returns: v_in, v_out """ Rs = circuit.Rs Is = circuit.diode.Is n = circuit.diode.n Vt = circuit.diode.Vt Rp = circuit.Rp Rd = circuit.Rd Id = Is * (np.exp(v_d / (n * Vt)) - 1.0) Vd = v_d if Rp is None: Vin = Vd + (Rd * Id) + (Rs * Id) else: Vin = Vd + (Rd * Id) + (Rs * Id) + (Vd * Rs / Rp) + (Id * Rd * Rs / Rp) Vout = Vd + Id * Rd return Vin, Vout
ec60231622c06f1a972af050c0403e8247aa75ed
3,658,261
def tex_coord(x, y, n=8): """ Return the bounding vertices of the texture square. """ m = 1.0 / n dx = x * m dy = y * m return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
0776dd61aa83b9c9d8afe7574607022c9f7c2b77
3,658,262
def uniindtemp_compute(da: xr.DataArray, thresh: str = "0.0 degC", freq: str = "YS"): """Docstring""" out = da - convert_units_to(thresh, da) out = out.resample(time=freq).mean() out.attrs["units"] = da.units return out
640a50e2a4ff61192f97e31c782be58437d301d0
3,658,263
def verify_parentheses(parentheses_string: str) -> bool: """Takes input string of only '{},[],()' and evaluates to True if valid.""" open_parentheses = [] valid_parentheses_set = {'(', ')', '[', ']', '{', '}'} parentheses_pairs = { ')' : '(', ']' : '[', '}' : '{' } if len(parentheses_string) % 2 != 0: return False for character in parentheses_string: if character not in valid_parentheses_set: raise ValueError("Only parentheses may be part of input string.") if character in {'(', '[', '{'}: open_parentheses.append(character) if character in {')', ']', '}'}: if len(open_parentheses) == 0: return False elif open_parentheses[-1] != parentheses_pairs[character]: return False del open_parentheses[-1] if len(open_parentheses) > 0: return False return True
2e2c07314d474b582f12af8cf53a311c0fa323c1
3,658,264
import time def _stableBaselineTrainingAndExecution(env, typeAgent, numberOptions, mode): """"Function to execute Baseline algorithms""" if typeAgent == 2: model = A2C(MlpPolicy, env, verbose=1) else: model = PPO2(MlpPolicy, env, verbose=1) print("Training model....") startTime = time() model.learn(total_timesteps=DEFAULT_TRAINING_RANGE) trainingTime = time() - startTime print("Model trained in " + str(trainingTime) + ".") print("Starting episodes....") totalSteps, numberEpisodes, studentTotalScore, projectTotalScore, skillsTotalScore = 0, 0, 0., 0., 0. bestResult = [] bestStudentScore = 0.0 bestStudentAssigned = 0 sumStudentAssigned = 0.0 allStudentsAssigned = [] allProjectAssignations = [] allSteps = [] allResults = [] allAverageStudentScore = [] allAverageProjectScore = [] allAverageSkillsScore = [] allStudentScores = [] allProjectScores = [] progressBar = Bar("-> Execution progress:", max=DEFAULT_EXECUTION_RANGE) for i in range(DEFAULT_EXECUTION_RANGE): state = env.reset(1) steps, reward = 0, 0 done = False print("Execution " + str(i)) while not done: action, _state = model.predict(state) state, reward, done, info = env.step(action) # env.render() steps += 1 numberEpisodes += 1 allSteps.append(steps) averageStudentScore, averageProjectScore, averageSkillsScore, studentScores, projectScores, studentsAssigned, projectAssignations = env.stepScores() allResults.append(env.finalState()) allAverageStudentScore.append(averageStudentScore) allAverageProjectScore.append(averageProjectScore) allAverageSkillsScore.append(averageSkillsScore) allStudentScores.append(studentScores) allProjectScores.append(projectScores) allStudentsAssigned.append(studentsAssigned) allProjectAssignations.append(projectAssignations) averageStudentAssigned = sum(studentsAssigned) / numberOptions sumStudentAssigned += sum(studentsAssigned) / numberOptions if averageStudentAssigned >= bestStudentAssigned and averageStudentScore > bestStudentScore: bestStudentAssigned = averageStudentAssigned bestStudentScore = averageStudentScore bestResult = env.finalState() progressBar.next() progressBar.finish() print("Execution done.") print(trainingTime) if mode == 0: _executionAnalysis(numberEpisodes, allStudentScores, allProjectScores, allSteps, bestStudentAssigned, numberOptions, allStudentsAssigned, allProjectAssignations, sumStudentAssigned) return bestResult
7fd91b28ab475fb43ea7e6af4ca17302863e269a
3,658,265
def string_to_hexadecimale_device_name(name: str) -> str: """Encode string device name to an appropriate hexadecimal value. Args: name: the desired name for encoding. Return: Hexadecimal representation of the name argument. """ length = len(name) if 1 < length < 33: hex_name = hexlify(name.encode()) zeros_pad = ("00" * (32 - length)).encode() return (hex_name + zeros_pad).decode() raise ValueError("name length can vary from 2 to 32")
53d5c5a221a2c3dac46c5fb15d051d78592b109b
3,658,266
def createTeam( firstIndex, secondIndex, isRed, first = 'DefensiveAgent', second = 'OffensiveAgent'): """ This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent index numbers. isRed is True if the red team is being created, and will be False if the blue team is being created. As a potentially helpful development aid, this function can take additional string-valued keyword arguments ("first" and "second" are such arguments in the case of this function), which will come from the --redOpts and --blueOpts command-line arguments to capture.py. For the nightly contest, however, your team will be created without any extra arguments, so you should make sure that the default behavior is what you want for the nightly contest. """ return [eval(first)(firstIndex), eval(second)(secondIndex)]
b99e8f548b6e6166517fb35a89f5381ef6d7692b
3,658,267
def str_dice(die): """Return a string representation of die. >>> str_dice(dice(1, 6)) 'die takes on values from 1 to 6' """ return 'die takes on values from {0} to {1}'.format(smallest(die), largest(die))
29eb7f6aa43e068e103016bbc8c35699fbf4a3ea
3,658,268
def transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convolutional layers. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors """ x = encoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS, value=hparams.num_encoder_layers or hparams.num_hidden_layers) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, value=hparams.attention_dropout) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DENSE, value={ "use_bias": "false", "num_heads": hparams.num_heads, "hidden_size": hparams.hidden_size }) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding else: padding = common_attention.attention_bias_to_padding( encoder_self_attention_bias) nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover and not common_layers.is_xla_compiled(): pad_remover = expert_utils.PadRemover(padding) for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): initial_sparsity = None if hparams.get("load_masks_from"): initial_sparsity = hparams.get("initial_sparsity") with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = sparse_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), sparsity_technique=hparams.get("sparsity_technique"), threshold=hparams.get("log_alpha_threshold"), training=hparams.get("mode") == tf.estimator.ModeKeys.TRAIN, clip_alpha=hparams.get("clip_log_alpha"), initial_sparsity=initial_sparsity, split_heads=hparams.get("split_heads")) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, pad_remover) x = common_layers.layer_postprocess(x, y, hparams) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NORM, value={"hidden_size": hparams.hidden_size}) return common_layers.layer_preprocess(x, hparams)
0f56315a972acc235dba7ec48c8b83b84bd6b3f1
3,658,270
import re def compute_dict(file_path): """Computes the dict for a file whose path is file_path""" file_dict = {} with open(file_path, encoding = 'utf8') as fin: for line in fin: line = line.strip() txt = re.sub('([^a-zA-Z0-9\s]+)',' \\1 ',line) txt = re.sub('([\s]+)',' ',txt) words = txt.split(" ") for word in words: w = str(word) if(w not in file_dict): file_dict[w] = 1 else: file_dict[w] = file_dict[w] + 1 return file_dict
821e29181aad781279b27174be0fd7458b60481f
3,658,272
def get_scale(lat1, lon1, var, desired_distance, unit='miles'): """ Calculate the difference in either latitude or longitude that is equivalent to some desired distance at a given point on Earth. For example, at a specific point, how much does latitude need to change (assuming longitude is constant) to be equal to 60 miles? This is especially important since lines of longitude are closer together near Earth's poles. This function is helpful when converting latitude and longitude coordinates to pixel coordinates in order to plot a point on the screen. Parameters: 1 - latitude of position in decimal degrees 2 - longitude of position in decimal degrees 3 - "lat" or "lon" to specify if calulating change for latitude or longitude 4 - the desired distance from the given point 5 - unit of measure (optional), "miles" or "km", default is miles Returns: The difference in latitude or longitude """ # Create a second point that is initially set to the starting point # The idea is to that push this point farther and farther away (either by lat or lon) # until it is the desired distance away lat2 = lat1 lon2 = lon1 # Create a variable for tracking the actual distance between the two points, which # can be compared against the desired distance actual_distance = get_distance(lat1, lon1, lat2, lon2, unit) n = 1 # Place value to increase or decrease lat/lon by (1, .1, .01, .001, etc.) decrease_n = False # Flag to indicate if n should be decreased if var == 'lat': var_value = lat2 # Variable for holding either latitude or longitude (whichever is being modified) elif var == 'lon': var_value = lon2 else: print '\nvalue not recognized: ' + str(var) + '\n' # Keep looping until the difference between the desired distance and the actual distance # is less than 0.0001 (in whatever units)... basically until it's really close while abs(round(desired_distance - actual_distance, 4)) > 0.0001: # Keep increasing the var_value until the actual distance is too great, then start decreasing until it's too small # If desired distance is greater than actual, add n to the var_value if desired_distance > actual_distance: var_value += n var_value = round(var_value, 6) # Round to 6 decimal places to clean up floating point messiness decrease_n = True # Indicate it's ok the decrease n if the following else statement is evaluated # If actual distance is greater than desired, subtract n from var_value else: if decrease_n: n *= 0.1 # Decrease n by a factor of ten var_value -= n var_value = round(var_value, 6) decrease_n = False # Don't decrease n until after the next time the if statement is evaluated # Recalculate the actual distance if var == 'lat': actual_distance = get_distance(lat1, lon1, var_value, lon2, unit) else: actual_distance = get_distance(lat1, lon1, lat2, var_value, unit) # print round(actual_distance, 4) for testing purposes # Return the difference between lat2 and lat1 (or lon2/lon1) that is equal to the desired distance if var == 'lat': return abs(round(var_value - lat1, 6)) else: return abs(round(var_value - lon1, 6))
258d95b1372d1b863f121552abb2ba6047f5aaad
3,658,273
from typing import Union from pathlib import Path async def connect_unix(path: Union[str, PathLike]) -> UNIXSocketStream: """ Connect to the given UNIX socket. Not available on Windows. :param path: path to the socket :return: a socket stream object """ path = str(Path(path)) return await get_asynclib().connect_unix(path)
825d69aa19afd593b355063639cbcd91cb23e9fa
3,658,274
def isMatch(s, p): """ Perform regular simple expression matching Given an input string s and a pattern p, run regular expression matching with support for '.' and '*'. Parameters ---------- s : str The string to match. p : str The pattern to match. Returns ------- bool Was it a match or not. """ dp = [[False] * (len(p) + 1) for _ in range(len(s) + 1)] dp[0][0] = True # The only way to match a length zero string # is to have a pattern of all *'s. for ii in range(1, len(p)): if p[ii] == "*" and dp[0][ii-1]: dp[0][ii + 1] = True for ii in range(len(s)): for jj in range(len(p)): # Matching a single caracter c or '.'. if p[jj] in {s[ii], '.'}: dp[ii+1][jj+1] = dp[ii][jj] elif p[jj] == '*': # Double **, which is equivalent to * if p[jj-1] not in {s[ii], '.'}: dp[ii+1][jj+1] = dp[ii+1][jj-1] # We can match .* or c* multiple times, once, or zero # times (respective clauses in the or's) else: dp[ii+1][jj+1] = dp[ii][jj+1] or dp[ii+1][jj] or dp[ii+1][jj-1] return dp[-1][-1]
92cd3171afeb73c6a58bbcd3d3ea6d707401cb09
3,658,275
def cepheid_lightcurve_advanced(band, tarr, m0, period, phaseshift, shape1, shape2, shape3, shape4, datatable=None): """ Generate a Cepheid light curve. More flexibility allowed. band: one of "B", "V", "I" tarr: times at which you want the light curve evaluated m0: mean magnitude for the light curve period: same units as tarr phaseshift: same units as tarr shape1-4: parameters determining the shape of the light curve. These are the first four principle components from Yoachim et al. 2009 They should generally be > 0. You can use datatable: which set of templates to use. By default, it loads the long period templates. Long period: >10 days; Short period: <10 days Can also pass an integer. Even int -> long period, odd int -> short period. Note: for speed in fitting, read the table you want and pass it in. """ allowed_bands = ["I","V","B"] assert band.upper() in allowed_bands if datatable is None: datatable = load_longperiod_datatable() elif isinstance(datatable,(int,float)): datatable = int(datatable) if (datatable % 2) == 1: datatable = load_shortperiod_datatable() else: datatable = load_longperiod_datatable() Nt = len(tarr) tstack = np.ravel([tarr for x in range(3)]) #p0 = m0i, m0v, m0b, period, phase shift, tbreak, tbreak2 p0 = [m0,m0,m0,period,phaseshift,Nt,2*Nt, shape1, shape2, shape3, shape4] lcs = gen_lc(tstack, p0, datatable) lc = lcs[allowed_bands.index(band)] return lc
7e9a94f4e59f3da31c8c21a6e35822a0ac0d8051
3,658,276
def app_list(context): """ Renders the app list for the admin dashboard widget. """ context["dashboard_app_list"] = admin_app_list(context["request"]) return context
699aa55403169f87c26fc9655d8c6dcb29aa14d2
3,658,277
def path_depth(path: str, depth: int = 1) -> str: """Returns the `path` up to a certain depth. Note that `depth` can be negative (such as `-x`) and will return all elements except for the last `x` components """ return path_join(path.split(CONFIG_SEPARATOR)[:depth])
c04899974560b2877db313fa0444203cc483a2b0
3,658,278
def read_config_file(filename, preserve_order=False): """ Read and parse a configuration file. Parameters ---------- filename : str Path of configuration file Returns ------- dict Configuration dictionary """ with open(filename) as f: return parse_config(f, preserve_order)
6a0aab4ae0da3abdddf080c98ee69eb92d2d8d04
3,658,279
def languages_list_handler(): """Get list of supported review languages (language codes from ISO 639-1). **Example Request:** .. code-block:: bash $ curl https://critiquebrainz.org/ws/1/review/languages \\ -X GET **Example Response:** .. code-block:: json { "languages": [ "aa", "ab", "af", "ak", "yo", "za", "zh", "zu" ] } :resheader Content-Type: *application/json* """ return jsonify(languages=supported_languages)
5b8791ad5d71a94486d96379f62ed9ebf850ec59
3,658,280
def corpus_subdirs(path): """ pathの中のdir(txt以外)をlistにして返す """ subdirs = [] for x in listdir(path): if not x.endswith('.txt'): subdirs.append(x) return subdirs
645f198f78795dbc5c14b7cfd400fa1e94dc9244
3,658,281
def edit_string_for_tags(tags): """ Given list of ``Tag`` instances or tag strings, creates a string representation of the list suitable for editing by the user, such that submitting the given string representation back without changing it will give the same list of tags. Tag names which contain commas will be double quoted. If any tag name which isn't being quoted contains whitespace, the resulting string of tag names will be comma-delimited, otherwise it will be space-delimited. Ported from Jonathan Buchanan's `django-tagging <http://django-tagging.googlecode.com/>`_ """ names = [] for tag in tags: if hasattr(tag, 'name'): name = tag.name elif isinstance(tag, (str, unicode,)): name = tag else: continue if u',' in name or u' ' in name: names.append('"%s"' % name) else: names.append(name) return u', '.join(sorted(names))
a05b6cb12e36304096d076e015077f1ec1cc3432
3,658,282
from typing import Optional def convert_postgres_array_as_string_to_list(array_as_string: str) -> Optional[list]: """ Postgres arrays are stored in CSVs as strings. Elasticsearch is able to handle lists of items, but needs to be passed a list instead of a string. In the case of an empty array, return null. For example, "{this,is,a,postgres,array}" -> ["this", "is", "a", "postgres", "array"]. """ return array_as_string[1:-1].split(",") if len(array_as_string) > 2 else None
cc64fe8e0cc765624f80abc3900985a443f76792
3,658,283
def generate_prime_number(min_value=0, max_value=300): """Generates a random prime number within the range min_value to max_value Parameters ---------- min_value : int, optional The smallest possible prime number you want, by default 0 max_value : int, optional The largest possible prime number you want, by default 300 Returns ------- int A randomly selected prime number in the range min_value to max_value """ # Create a list of prime values within the range primes = [number for number in range(min_value,max_value) if is_prime(number)] return choice(primes)
539f74fcdba2c366b0fe13b0bc0fab6727300ec1
3,658,284
def sort_extended_practitioner(practitioner): """ sort on date latestDate Then alpha on other practitioners :param practitioner: :return: practitioner """ uniques = [] for p in practitioner: if find_uniques(p, uniques): uniques.append(p) return uniques
476d9adf9d93b88f20166b1e95715aaa54bd67f9
3,658,285
def lti13_login_params_dict(lti13_login_params): """ Return the initial LTI 1.3 authorization request as a dict """ utils = LTIUtils() args = utils.convert_request_to_dict(lti13_login_params) return args
026e65c132666816f774a05f6977dac9ab194b77
3,658,286
def calcShannonEnt(dataset): """ 计算数据集的熵 输入:数据集 输出:熵 """ numEntris = len(dataset) labelCounts = {} for featVec in dataset: currentLabel = featVec[-1] #每行数据中的最后一个数,即数据的决策结果 label if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 labelCounts[currentLabel]+=1 #labelCounts记录了叶节点的种类(keys)和每个类的数量(values) # shannonEnt = 0 for key in labelCounts: prob = float(labelCounts[key])/numEntris shannonEnt -= prob*log(prob,2) return shannonEnt
9b64d0ad0bb517deb77c24f3c08e004d255daa68
3,658,287
import array def get_boundaries_medians(things, lowers=[], uppers=[]): """Return the boundaries and medians of given percentage ranges. Parameters: 1. things: a list of numbers 2. lowers: lower percentage limits 3. uppers: upper percentage limits Returns: lower, median, upper """ # if neither list nor array nor tuple, just return None if type(things)!=list and type(things)!=array and type(things)!=tuple: return [], [], [] n_things = len(things) if n_things == 0: return [], [], [] sthings = sorted(list(things)) l = map(lambda x: int(round(1.0*x*n_things/100))-1, lowers) r = map(lambda x: int(round(1.0*x*n_things/100)), uppers) return map(lambda x: sthings[x], l), map(lambda x, y: median(sthings[max(0, x):min(n_things, y+1)]), l, r), map(lambda y: sthings[y], r)
d83365ad2d9598dc19c279a6b20424746f53d6ce
3,658,288
def getMeanBySweep(abf, markerTime1, markerTime2): """ Return the mean value between the markers for every sweep. """ assert isinstance(abf, pyabf.ABF) pointsPerSecond = abf.dataRate sweepIndex1 = pointsPerSecond * markerTime1 sweepIndex2 = pointsPerSecond * markerTime2 means = [] for i in range(abf.sweepCount): abf.setSweep(i) segment = abf.sweepY[sweepIndex1:sweepIndex2] segmentMean = np.mean(segment) means.append(segmentMean) return means
8051d67c832c9b331798f896b8f98c2673770a94
3,658,289
def add_prefix(key): """Dummy key_function for testing index code.""" return "id_" + key
96dda0bd57b4eb89f17c8bb69ad48e3e1675a470
3,658,291
def schedule_conv2d_nhwc_tensorcore(cfg, outs): """TOPI schedule callback""" s = te.create_schedule([x.op for x in outs]) def _callback(op): if "conv2d_nhwc_tensorcore" in op.tag: schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0)) traverse_inline(s, outs[0].op, _callback) return s
318be211f02469c1e971a9303f48f92f88af5755
3,658,292
import tqdm def simulate(config, show_progress=False): """Simulate incarceration contagion dynamics. Parameters ---------- config : Config Config object specifying simulation parameters. Returns ------- dict Dictionary specifying simulated population of agents. """ popu = initialize(config) agents = popu.values() def display(range_obj): if show_progress: range_obj = tqdm(range_obj) return range_obj # these are in years. need to work in terms of months for itr in display(range(config.start_iter, config.end_iter)): for month in range(12): # infection step for person in agents: # random infection, not due to contagion if valid_age(person, itr, config.min_age): person["infected"] += infect("*", "*", "*") # infect connected people if person["incarcerated"] > 0: person["incarcerated"] -= 1 person["months_in_prison"] += 1 spread_infection(popu, person, itr, month, config) # sentencing step for person in agents: if person["infected"] and not person["incarcerated"]: assign_sentence(person, itr, month, config) person["infected"] = 0 return popu
b5b813c1038d6b473ff3f4fa6beccdad2615f2af
3,658,293
import cel_import def get_event_log(file_path: str = None, use_celonis=False): """ Gets the event log data structure from the event log file. Dispatches the methods to be used by file tyoe :param use_celonis: If the attribute is set to true the event log will be retrieved from celonis :param file_path: Path to the event-log file :return:EventLog data structure """ if file_path is None and not use_celonis: raise ValueError("Parameters file_path was None and use_celonis was false at the same time." "This behavior is not supported") if use_celonis: return cel_import.get_event_log_from_celonis() else: file_path_lowercase = file_path.lower() if file_path_lowercase.endswith(".xes"): return __handle_xes_file(file_path) else: raise ValueError('The input file was not a XES file')
9b77a2bed9d6551cc2d0e4eb607de0d81b95c6f3
3,658,294
def find_peak(corr, method='gaussian'): """Peak detection algorithm switch After loading the correlation window an maximum finder is invoked. The correlation window is cut down to the necessary 9 points around the maximum. Afterwards the maximum is checked not to be close to the boarder of the correlation frame. This cropped window is used in along with the chosen method to interpolate the sub pixel shift. Each interpolation method returns a tuple with the sub pixel shift in x and y direction. The maximums position and the sub pixel shift are added and returned. If an error occurred during the sub pixel interpolation the shift is set to nan. Also if the interpolation method is unknown an exception in thrown. :param corr: correlation window :param method: peak finder algorithm (gaussian, centroid, parabolic, 9point) :raises: Sub pixel interpolation method not found :returns: shift in interrogation window """ i, j = np.unravel_index(corr.argmax(), corr.shape) if check_peak_position(corr, i, j) is False: return np.nan, np.nan window = corr[i-1:i+2, j-1:j+2] if method == 'gaussian': subpixel_interpolation = gaussian elif method == 'centroid': subpixel_interpolation = centroid elif method == 'parabolic': subpixel_interpolation = parabolic elif method == '9point': subpixel_interpolation = gaussian2D else: raise Exception('Sub pixel interpolation method not found!') try: dx, dy = subpixel_interpolation(window) except: return np.nan, np.nan else: return (i + dx, j + dy)
685eb87cefc6cd2566a9e9d40f827a1fea010b73
3,658,295
def binaryToString(binary): """ 从二进制字符串转为 UTF-8 字符串 """ index = 0 string = [] rec = lambda x, i: x[2:8] + (rec(x[8:], i - 1) if i > 1 else '') if x else '' fun = lambda x, i: x[i + 1:8] + rec(x[8:], i - 1) while index + 1 < len(binary): chartype = binary[index:].index('0') # 存放字符所占字节数,一个字节的字符会存为0 length = chartype * 8 if chartype else 8 string.append(chr(int(fun(binary[index:index + length], chartype), 2))) index += length return ''.join(string)
2044109d573abe7c9428b64b289b5aa82ec4d624
3,658,297
import functools import logging def disable_log_warning(fun): """Temporarily set FTP server's logging level to ERROR.""" @functools.wraps(fun) def wrapper(self, *args, **kwargs): logger = logging.getLogger('pyftpdlib') level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) try: return fun(self, *args, **kwargs) finally: logger.setLevel(level) return wrapper
6990a2a1a60ea5a24e4d3ac5c5e7fbf443825e48
3,658,298
def create_csv_step_logger(save_dir: pyrado.PathLike, file_name: str = "progress.csv") -> StepLogger: """ Create a step-based logger which only safes to a csv-file. :param save_dir: parent directory to save the results in (usually the algorithm's `save_dir`) :param file_name: name of the cvs-file (with ending) :return: step-based logger """ logger = StepLogger() logfile = osp.join(save_dir, file_name) logger.printers.append(CSVPrinter(logfile)) return logger
355c205cf5f42b797b84005c1485c2b6cae74c2e
3,658,299
def _key_chord_transition_distribution( key_chord_distribution, key_change_prob, chord_change_prob): """Transition distribution between key-chord pairs.""" mat = np.zeros([len(_KEY_CHORDS), len(_KEY_CHORDS)]) for i, key_chord_1 in enumerate(_KEY_CHORDS): key_1, chord_1 = key_chord_1 chord_index_1 = i % len(_CHORDS) for j, key_chord_2 in enumerate(_KEY_CHORDS): key_2, chord_2 = key_chord_2 chord_index_2 = j % len(_CHORDS) if key_1 != key_2: # Key change. Chord probability depends only on key and not previous # chord. mat[i, j] = (key_change_prob / 11) mat[i, j] *= key_chord_distribution[key_2, chord_index_2] else: # No key change. mat[i, j] = 1 - key_change_prob if chord_1 != chord_2: # Chord probability depends on key, but we have to redistribute the # probability mass on the previous chord since we know the chord # changed. mat[i, j] *= ( chord_change_prob * ( key_chord_distribution[key_2, chord_index_2] + key_chord_distribution[key_2, chord_index_1] / (len(_CHORDS) - 1))) else: # No chord change. mat[i, j] *= 1 - chord_change_prob return mat
0e89b1e11494237c526170f25286c3ad098a1023
3,658,300
def level_set( current_price, standard_deviation, cloud, stop_mod, take_profit_mod, ): """ Calculates risk and reward levels. Should return a stop loss and take profit levels. For opening a new position. Returns a stop (in the format (StopType, offset)) and a take profit level. """ stop = None take_profit = None cloud_color = cloud.status[0] cloud_location = cloud.status[1] direction_mod = 1 if cloud_color == CloudColor.RED: direction_mod = -1 take_profit_mod = take_profit_mod * direction_mod stop_mod = stop_mod * direction_mod if cloud_location == CloudPriceLocation.INSIDE: # ie passing through long ema stop = (StopType.EMA_LONG, (standard_deviation * stop_mod * -1)) # If price passes through short EMA from either color cloud. if cloud_location in (CloudPriceLocation.ABOVE, CloudPriceLocation.BELOW): stop = (StopType.EMA_LONG, 0) # Or in case the long EMA is very far away: if abs(cloud.long_ema - current_price) > abs(current_price - (cloud.short_ema - (direction_mod * 2 * standard_deviation * -1))): stop = ( StopType.EMA_SHORT, (direction_mod * 2 * standard_deviation * -1)) # Or if the long EMA is too close: elif abs(cloud.long_ema - current_price) < abs(current_price - (cloud.short_ema - (direction_mod * 0.5 * standard_deviation * -1))): stop = ( StopType.EMA_SHORT, (direction_mod * 0.5 * standard_deviation * -1)) take_profit = cloud.short_ema + (standard_deviation * take_profit_mod) risk_loss = abs(current_price - StopType.stop_tuple_to_level(stop, cloud)) # Enforce max_ratio:1 reward:risk if take_profit is very far away. max_ratio = 1.5 min_ratio = 1.0 potential_profit = abs(current_price - take_profit) if potential_profit > max_ratio * risk_loss: take_profit = current_price + (direction_mod * max_ratio * risk_loss) if potential_profit < max_ratio * risk_loss: stop = (current_price, potential_profit * direction_mod * -.95) return stop, take_profit
541a15b22bc830db530658c10515a15def196516
3,658,301
def back(deque): """ returns the last elemement in the que """ if length(deque) > 0: return deque[-1] else: return None
810d2135cf39af7959f6142be4b2b3abee8d6185
3,658,302
import json import operator def my_subs_helper(s): """Helper function to handle badly formed JSON stored in the database""" try: return {'time_created':s.time_created, 'json_obj':sorted(json.loads(s.json_data).iteritems(), key=operator.itemgetter(0)), 'plain_json_obj':json.dumps(json.loads(s.json_data)),'id':s.id, 'json_score_data':json.dumps(s.json_score_data)} except ValueError: return {'time_created':s.time_created, 'json_obj':"__ERROR__", 'plain_json_obj':"__ERROR__", 'id':s.id}
4b649d865c3a99f89111baa694df4902e65243e6
3,658,303
def dynamic_features(data_dir, year, data_source, voronoi, radar_buffers, **kwargs): """ Load all dynamic features, including bird densities and velocities, environmental data, and derived features such as estimated accumulation of bird on the ground due to adverse weather. Missing data is interpolated, but marked as missing. :param data_dir: directory containing all relevant data :param year: year of interest :param data_source: 'radar' or 'abm' (simulated data) :param voronoi: Voronoi tessellation (geopandas dataframe) :param radar_buffers: radar buffers with static features (geopandas dataframe) :return: dynamic features (pandas dataframe) """ env_points = kwargs.get('env_points', 100) season = kwargs.get('season', 'fall') random_seed = kwargs.get('seed', 1234) pref_dirs = kwargs.get('pref_dirs', {'spring': 58, 'fall': 223}) pref_dir = pref_dirs[season] wp_threshold = kwargs.get('wp_threshold', -0.5) edge_type = kwargs.get('edge_type', 'voronoi') t_unit = kwargs.get('t_unit', '1H') print(f'##### load data for {season} {year} #####') if data_source in ['radar', 'nexrad']: print(f'load radar data') radar_dir = osp.join(data_dir, data_source) voronoi_radars = voronoi.query('observed == True') birds_km2, _, t_range = datahandling.load_season(radar_dir, season, year, ['vid'], t_unit=t_unit, mask_days=False, radar_names=voronoi_radars.radar, interpolate_nans=False) radar_data, _, t_range = datahandling.load_season(radar_dir, season, year, ['ff', 'dd', 'u', 'v'], t_unit=t_unit, mask_days=False, radar_names=voronoi_radars.radar, interpolate_nans=True) bird_speed = radar_data[:, 0, :] bird_direction = radar_data[:, 1, :] bird_u = radar_data[:, 2, :] bird_v = radar_data[:, 3, :] # rescale according to voronoi cell size data = birds_km2 * voronoi_radars.area_km2.to_numpy()[:, None] t_range = t_range.tz_localize('UTC') elif data_source == 'abm': print(f'load abm data') abm_dir = osp.join(data_dir, 'abm') voronoi_radars = voronoi.query('observed == True') radar_buffers_radars = radar_buffers.query('observed == True') data, t_range, bird_u, bird_v = abm.load_season(abm_dir, season, year, voronoi_radars) buffer_data = abm.load_season(abm_dir, season, year, radar_buffers_radars, uv=False)[0] # rescale to birds per km^2 birds_km2 = data / voronoi_radars.area_km2.to_numpy()[:, None] birds_km2_from_buffer = buffer_data / radar_buffers_radars.area_km2.to_numpy()[:, None] # rescale to birds per voronoi cell birds_from_buffer = birds_km2_from_buffer * voronoi_radars.area_km2.to_numpy()[:, None] # time range for solar positions to be able to infer dusk and dawn solar_t_range = t_range.insert(-1, t_range[-1] + pd.Timedelta(t_range.freq)) print('load env data') env_vars = kwargs.get('env_vars', ['u', 'v', 'u10', 'v10', 'cc', 'tp', 'sp', 't2m', 'sshf']) env_vars = [v for v in env_vars if not v in ['night', 'dusk', 'dawn', 'dayofyear', 'solarpos', 'solarpos_dt']] if len(env_vars) > 0: if edge_type == 'voronoi': env_areas = voronoi.geometry else: env_areas = radar_buffers.geometry env_850 = era5interface.compute_cell_avg(osp.join(data_dir, 'env', season, year, 'pressure_level_850.nc'), env_areas, env_points, t_range.tz_localize(None), vars=env_vars, seed=random_seed) env_surface = era5interface.compute_cell_avg(osp.join(data_dir, 'env', season, year, 'surface.nc'), env_areas, env_points, t_range.tz_localize(None), vars=env_vars, seed=random_seed) dfs = [] for ridx, row in voronoi.iterrows(): df = {} df['radar'] = [row.radar] * len(t_range) print(f'preprocess radar {row.radar}') # time related variables for radar ridx solarpos = np.array(solarposition.get_solarposition(solar_t_range, row.lat, row.lon).elevation) night = np.logical_or(solarpos[:-1] < -6, solarpos[1:] < -6) df['solarpos_dt'] = solarpos[:-1] - solarpos[1:] df['solarpos'] = solarpos[:-1] df['night'] = night df['dusk'] = np.logical_and(solarpos[:-1] >=6, solarpos[1:] < 6) # switching from day to night df['dawn'] = np.logical_and(solarpos[:-1] < 6, solarpos[1:] >=6) # switching from night to day df['datetime'] = t_range df['dayofyear'] = pd.DatetimeIndex(t_range).dayofyear df['tidx'] = np.arange(t_range.size) # bird measurements for radar ridx df['birds'] = data[ridx] if row.observed else [np.nan] * len(t_range) df['birds_km2'] = birds_km2[ridx] if row.observed else [np.nan] * len(t_range) cols = ['birds', 'birds_km2', 'birds_from_buffer', 'birds_km2_from_buffer', 'bird_u', 'bird_v'] df['bird_u'] = bird_u[ridx] if row.observed else [np.nan] * len(t_range) df['bird_v'] = bird_v[ridx] if row.observed else [np.nan] * len(t_range) if data_source == 'abm': df['birds_from_buffer'] = birds_from_buffer[ridx] if row.observed else [np.nan] * len(t_range) df['birds_km2_from_buffer'] = birds_km2_from_buffer[ridx] if row.observed else [np.nan] * len(t_range) else: df['birds_from_buffer'] = data[ridx] if row.observed else [np.nan] * len(t_range) df['birds_km2_from_buffer'] = birds_km2[ridx] if row.observed else [np.nan] * len(t_range) df['bird_speed'] = bird_speed[ridx] if row.observed else [np.nan] * len(t_range) df['bird_direction'] = bird_direction[ridx] if row.observed else [np.nan] * len(t_range) cols.extend(['bird_speed', 'bird_direction']) if len(env_vars) > 0: # environmental variables for radar ridx for var in env_vars: if var in env_850: print(f'found {var} in env_850 dataset') df[var] = env_850[var][ridx] elif var in env_surface: print(f'found {var} in surface dataset') df[var] = env_surface[var][ridx] df['wind_speed'] = np.sqrt(np.square(df['u']) + np.square(df['v'])) # Note that here wind direction is the direction into which the wind is blowing, # which is the opposite of the standard meteorological wind direction df['wind_dir'] = (abm.uv2deg(df['u'], df['v']) + 360) % 360 # compute accumulation variables (for baseline models) groups = [list(g) for k, g in it.groupby(enumerate(df['night']), key=lambda x: x[-1])] nights = [[item[0] for item in g] for g in groups if g[0][1]] df['nightID'] = np.zeros(t_range.size) df['acc_rain'] = np.zeros(t_range.size) df['acc_wind'] = np.zeros(t_range.size) df['wind_profit'] = np.zeros(t_range.size) acc_rain = 0 u_rain = 0 acc_wind = 0 u_wind = 0 for nidx, night in enumerate(nights): df['nightID'][night] = np.ones(len(night)) * (nidx + 1) # accumulation due to rain in the past nights acc_rain = acc_rain/3 + u_rain * 2/3 df['acc_rain'][night] = np.ones(len(night)) * acc_rain # compute proportion of hours with rain during the night u_rain = np.mean(df['tp'][night] > 0.01) # accumulation due to unfavourable wind in the past nights acc_wind = acc_wind/3 + u_wind * 2/3 df['acc_wind'][night] = np.ones(len(night)) * acc_wind # compute wind profit for bird with speed 12 m/s and flight direction 223 degree north v_air = np.ones(len(night)) * 12 alpha = np.ones(len(night)) * pref_dir df['wind_profit'][night] = v_air - np.sqrt(v_air**2 + df['wind_speed'][night]**2 - 2 * v_air * df['wind_speed'][night] * np.cos(np.deg2rad(alpha-df['wind_dir'][night]))) u_wind = np.mean(df['wind_profit'][night]) < wp_threshold radar_df = pd.DataFrame(df) radar_df['missing'] = 0 for col in cols: if data_source == 'radar': # radar quantities being exactly 0 during the night are missing, # radar quantities during the day are set to 0 print(f'check missing data for column {col}') radar_df[col] = radar_df.apply(lambda row: np.nan if (row.night and not row[col]) else (0 if not row.night else row[col]), axis=1) # remember missing radar observations radar_df['missing'] = radar_df['missing'] | radar_df[col].isna() # fill missing bird measurements by interpolation if col == 'bird_direction': # use "nearest", to avoid artifacts of interpolating between e.g. 350 and 2 degree radar_df[col].interpolate(method='nearest', inplace=True) else: # for all other quantities simply interpolate linearly radar_df[col].interpolate(method='linear', inplace=True) else: radar_df[col] = radar_df.apply(lambda row: np.nan if (row.night and np.isnan(row[col])) else (0 if not row.night else row[col]), axis=1) radar_df['missing'] = radar_df['missing'] | radar_df[col].isna() # fill missing bird measurements with 0 radar_df[col].fillna(0, inplace=True) dfs.append(radar_df) print(f'found {radar_df.missing.sum()} misssing time points') dynamic_feature_df = pd.concat(dfs, ignore_index=True) print(f'columns: {dynamic_feature_df.columns}') return dynamic_feature_df
fd5675b127d6a20f930d8ee88366e7426c5a09b9
3,658,304
def __normalize_allele_strand(snp_dfm): """ Keep all the alleles on FWD strand. If `strand` is "-", flip every base in `alleles`; otherwise do not change `alleles`. """ on_rev = (snp_dfm.loc[:, "strand"] == "-") has_alleles = (snp_dfm.loc[:, "alleles"].str.len() > 0) condition = (on_rev & has_alleles) if not snp_dfm.loc[condition, :].empty: snp_dfm.loc[condition, "alleles"] = snp_dfm.loc[condition, "alleles"].apply(flip_allele) return snp_dfm
1ebe00294eb55de96d68fc214bd5051d40a2dfa5
3,658,305
def add_to_codetree(tword,codetree,freq=1): """ Adds one tuple-word to tree structure - one node per symbol word end in the tree characterized by node[0]>0 """ unique=0 for pos in range(len(tword)): s = tword[pos] if s not in codetree: codetree[s] = [0,{}] unique+=1 codetree[s][0] += freq codetree = codetree[s][1] return unique
e92a48f112e7a774bed3b125509f7f64dce0a7ec
3,658,306
def TA_ADXm(data, period=10, smooth=10, limit=18): """ Moving Average ADX ADX Smoothing Trend Color Change on Moving Average and ADX Cross. Use on Hourly Charts - Green UpTrend - Red DownTrend - Black Choppy No Trend Source: https://www.tradingview.com/script/owwws7dM-Moving-Average-ADX/ Parameters ---------- data : (N,) array_like 传入 OHLC Kline 序列。 The OHLC Kline. period : int or None, optional DI 统计周期 默认值为 10 DI Length period. Default value is 10. smooth : int or None, optional ADX 平滑周期 默认值为 10 ADX smoothing length period. Default value is 10. limit : int or None, optional ADX 限制阈值 默认值为 18 ADX MA Active limit threshold. Default value is 18. Returns ------- adx, ADXm : ndarray ADXm 指标和趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨) ADXm indicator and thread directions sequence. (-1, 0, 1) means for (Negatice, No Trend, Postive) """ up = data.high.pct_change() down = data.low.pct_change() * -1 trur = TA_HMA(talib.TRANGE(data.high.values, data.low.values, data.close.values) , period) plus = 100 * TA_HMA(np.where(((up > down) & (up > 0)), up, 0), period) / trur minus = 100 * TA_HMA(np.where(((down > up) & (down > 0)), down, 0), period) / trur # 这里是dropna的替代解决办法,因为我觉得nparray的传递方式如果随便drop了可能会跟 data.index 对不上,所以我选择补零替代dropna plus = np.r_[np.zeros(period + 2), plus[(period + 2):]] minus = np.r_[np.zeros(period + 2), minus[(period + 2):]] sum = plus + minus adx = 100 * TA_HMA(abs(plus - minus) / (np.where((sum == 0), 1, sum)), smooth) adx = np.r_[np.zeros(smooth + 2), adx[(smooth + 2):]] ADXm = np.where(((adx > limit) & (plus > minus)), 1, np.where(((adx > limit) & (plus < minus)), -1, 0)) return adx, ADXm
40f41b013127b122bddf66e3dfe53f746c89b3c7
3,658,307
def remove_from_dict(obj, keys=list(), keep_keys=True): """ Prune a class or dictionary of all but keys (keep_keys=True). Prune a class or dictionary of specified keys.(keep_keys=False). """ if type(obj) == dict: items = list(obj.items()) elif isinstance(obj, dict): items = list(obj.items()) else: items = list(obj.__dict__.items()) if keep_keys: return {k: v for k, v in items if k in keys} else: return {k: v for k, v in items if k not in keys}
b1d9a2bd17269e079ce136cc464060fc47fe5906
3,658,308
def unify_qso_catalog_uvqs_old(qsos): """Unifies the name of columns that are relevant for the analysis""" qsos.rename_column('RA','ra') qsos.rename_column('DEC','dec') qsos.rename_column('FUV','mag_fuv') qsos.rename_column('Z','redshift') qsos.add_column(Column(name='id',data=np.arange(len(qsos))+1)) return qsos
8fe561e7d6e99c93d08efe5ff16d6e37ed66ab4e
3,658,309
def get_hash_key_name(value): """Returns a valid entity key_name that's a hash of the supplied value.""" return 'hash_' + sha1_hash(value)
b2bba3031efccb5dab1781695fc39c993f735e71
3,658,310
import yaml def yaml_dumps(value, indent=2): """ YAML dumps that supports Unicode and the ``as_raw`` property of objects if available. """ return yaml.dump(value, indent=indent, allow_unicode=True, Dumper=YamlAsRawDumper)
ed368fb84967190e460c1bcf51bd573323ff4f46
3,658,311
def poi_remove(poi_id: int): """Removes POI record Args: poi_id: ID of the POI to be removed """ poi = POI.get_by_id(poi_id) if not poi: abort(404) poi.delete() db.session.commit() return redirect_return()
26c1cb2524c6a19d9382e9e0d27947a0d2b2a98c
3,658,312
def stringToTupleOfFloats(s): """ Converts s to a tuple @param s: string @return: tuple represented by s """ ans = [] for i in s.strip("()").split(","): if i.strip() != "": if i == "null": ans.append(None) else: ans.append(float(i)) return tuple(ans)
7eec23232f884035b12c6498f1e68616e4580878
3,658,313
import json import requests def create_training(training: TrainingSchema): """ Create an training with an TrainingSchema :param training: training data as TrainingSchema :return: http response """ endpoint_url = Config.get_api_url() + "training" job_token = Config.get_job_token() headers = { 'content-type': 'application/json', 'jobtoken': job_token } data = json.dumps(training.get_dict()) response = requests.post(endpoint_url, data=data, headers=headers) return response
c0ce20fc68cbb3d46b00e451b85bf01991579bcc
3,658,314
def respects_language(fun): """Decorator for tasks with respect to site's current language. You can use this decorator on your tasks together with default @task decorator (remember that the task decorator must be applied last). See also the with-statement alternative :func:`respect_language`. **Example**: .. code-block:: python @task @respects_language def my_task() # localize something. The task will then accept a ``language`` argument that will be used to set the language in the task, and the task can thus be called like: .. code-block:: python from django.utils import translation from myapp.tasks import my_task # Pass the current language on to the task my_task.delay(language=translation.get_language()) # or set the language explicitly my_task.delay(language='no.no') """ @wraps(fun) def _inner(*args, **kwargs): with respect_language(kwargs.pop('language', None)): return fun(*args, **kwargs) return _inner
547629321d649a102a0c082b1eddcac32334432c
3,658,315
def zero_one_window(data, axis=(0, 1, 2), ceiling_percentile=99, floor_percentile=1, floor=0, ceiling=1, channels_axis=None): """ :param data: Numpy ndarray. :param axis: :param ceiling_percentile: Percentile value of the foreground to set to the ceiling. :param floor_percentile: Percentile value of the image to set to the floor. :param floor: New minimum value. :param ceiling: New maximum value. :param channels_axis: :return: """ data = np.copy(data) if len(axis) != data.ndim: floor_threshold = np.percentile(data, floor_percentile, axis=axis) if channels_axis is None: channels_axis = find_channel_axis(data.ndim, axis=axis) data = np.moveaxis(data, channels_axis, 0) for channel in range(data.shape[0]): channel_data = data[channel] # find the background bg_mask = channel_data <= floor_threshold[channel] # use background to find foreground fg = channel_data[bg_mask == False] # find threshold based on foreground percentile ceiling_threshold = np.percentile(fg, ceiling_percentile) # normalize the data for this channel data[channel] = window_data(channel_data, floor_threshold=floor_threshold[channel], ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling) data = np.moveaxis(data, 0, channels_axis) else: floor_threshold = np.percentile(data, floor_percentile) fg_mask = data > floor_threshold fg = data[fg_mask] ceiling_threshold = np.percentile(fg, ceiling_percentile) data = window_data(data, floor_threshold=floor_threshold, ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling) return data
4056433a9f3984bebc1c99f30be4f8e9ddc31026
3,658,316
def get_event_stderr(e): """Return the stderr field (if any) associated with the event.""" if _API_VERSION == google_v2_versions.V2ALPHA1: return e.get('details', {}).get('stderr') elif _API_VERSION == google_v2_versions.V2BETA: for event_type in ['containerStopped']: if event_type in e: return e[event_type].get('stderr') else: assert False, 'Unexpected version: {}'.format(_API_VERSION)
89a32228d3ad0ecb92c6c0b45664903d6f4b507d
3,658,318
def xA(alpha, gamma, lsa, lsd, y, xp, nv): """Calculate position where the beam hits the analyzer crystal. :param alpha: the divergence angle of the neutron :param gamma: the tilt angle of the deflector :param lsa: the sample-analyzer distance :param lsd: the sample deflector distance :param y: the translation of the analyser crystal :param xp: the point at the sample where the neutron is scattered :param nv: neutron path: transmitted(0), reflected at the first deflector(1), reflected at the second deflector(2), """ if nv == 0: return xp + (lsa - y) * tan(radians(alpha)) return xp + lsd * tan(radians(alpha)) + \ (lsa - lsd - y) * tan(radians(2 * gamma - alpha))
0dfb9bd7b761fa0669893c692f3adb2a5cb079c4
3,658,319
from typing import Optional from datetime import datetime def find_recent_login(user_id: UserID) -> Optional[datetime]: """Return the time of the user's most recent login, if found.""" recent_login = db.session \ .query(DbRecentLogin) \ .filter_by(user_id=user_id) \ .one_or_none() if recent_login is None: return None return recent_login.occurred_at
153dc509e2382e8f9eb18917d9be04d171ffdee9
3,658,320
async def async_remove_config_entry_device( hass: HomeAssistant, config_entry: ConfigEntry, device_entry: dr.DeviceEntry ) -> bool: """Remove ufp config entry from a device.""" unifi_macs = { _async_unifi_mac_from_hass(connection[1]) for connection in device_entry.connections if connection[0] == dr.CONNECTION_NETWORK_MAC } api = async_ufp_instance_for_config_entry_ids(hass, {config_entry.entry_id}) assert api is not None if api.bootstrap.nvr.mac in unifi_macs: return False for device in async_get_devices(api.bootstrap, DEVICES_THAT_ADOPT): if device.is_adopted_by_us and device.mac in unifi_macs: return False return True
f8ae37a454f5c5e3314676162ff48e1e05530396
3,658,321
def batch_unsrt_segment_sum(data, segment_ids, num_segments): """ Performas the `tf.unsorted_segment_sum` operation batch-wise""" # create distinct segments per batch num_batches = tf.shape(segment_ids, out_type=tf.int64)[0] batch_indices = tf.range(num_batches) segment_ids_per_batch = segment_ids + num_segments * tf.expand_dims(batch_indices, axis=1) # do the normal unsegment sum and reshape to original shape seg_sums = tf.unsorted_segment_sum(data, segment_ids_per_batch, num_segments * num_batches) return tf.reshape(seg_sums, tf.stack((-1, num_segments)))
299a514e926c43564960288c706c1d535620144b
3,658,322
import json def read_json(file_name): """Read json from file.""" with open(file_name) as f: return json.load(f)
2eccab7dddb1c1038de737879c465f293a00e5de
3,658,323
def get_role(request): """Look up the "role" query parameter in the URL.""" query = request.ws_resource.split('?', 1) if len(query) == 1: raise LookupError('No query string found in URL') param = parse.parse_qs(query[1]) if 'role' not in param: raise LookupError('No role parameter found in the query string') return param['role'][0]
87cc8f15a3d0aeb45a8d7ea67fb34573e41b7df7
3,658,324
def login(username: str, password: str) -> Person: """通过用户名和密码登录智学网 Args: username (str): 用户名, 可以为准考证号, 手机号 password (str): 密码 Raises: ArgError: 参数错误 UserOrPassError: 用户名或密码错误 UserNotFoundError: 未找到用户 LoginError: 登录错误 RoleError: 账号角色未知 Returns: Person """ session = get_session(username, password) if check_is_student(session): return StudentAccount(session).set_base_info() return TeacherAccount(session).set_base_info()
a982cddb107cc8ccf8c9d1868e91299cd6ac07f3
3,658,325
def _decode_end(_fp): """Decode the end tag, which has no data in the file, returning 0. :type _fp: A binary `file object` :rtype: int """ return 0
5e8da3585dda0b9c3c7cd428b7e1606e585e15c6
3,658,326
def make_dqn_agent(q_agent_type, arch, n_actions, lr=2.5e-4, noisy_net_sigma=None, buffer_length=10 ** 6, final_epsilon=0.01, final_exploration_frames=10 ** 6, use_gpu=0, replay_start_size=5 * 10 **4, target_update_interval=3 * 10**4, update_interval=4, ): """ given an architecture and an specific dqn return the agent args: q_agent_type: choices=["DQN", "DoubleDQN", "PAL"] arch: choices=["nature", "nips", "dueling", "doubledqn"] final_epsilon: Final value of epsilon during training final_exploration_frames: Timesteps after which we stop annealing exploration rate replay_start_size: Minimum replay buffer size before performing gradient updates. target_update_interval: Frequency (in timesteps) at which the target network is updated update_interval: Frequency (in timesteps) of network updates. """ # q function q_func = parse_arch(arch, n_actions) # explorer if noisy_net_sigma is not None: pnn.to_factorized_noisy(q_func, sigma_scale=noisy_net_sigma) # turn off explorer explorer = explorers.Greedy() else: # deafult option explorer = explorers.LinearDecayEpsilonGreedy( 1.0, final_epsilon, final_exploration_frames, lambda: np.random.randint(n_actions), ) # optimizer # Use the Nature paper's hyperparameters opt = pfrl.optimizers.RMSpropEpsInsideSqrt( q_func.parameters(), lr=lr, alpha=0.95, momentum=0.0, eps=1e-2, centered=True, ) # replay_buffer rbuf = replay_buffers.ReplayBuffer(buffer_length) # Feature extractor def phi(x): return np.asarray(x, dtype=np.float32) / 255 Agent = parse_agent(q_agent_type) agent = Agent( q_func, opt, rbuf, gpu=use_gpu, # 0 or -1 gamma=0.99, explorer=explorer, replay_start_size=replay_start_size, target_update_interval=target_update_interval, clip_delta=True, update_interval=update_interval, batch_accumulator="sum", phi=phi, ) return agent
0b5974e30a12ef760a424d8d229319ccfee3119a
3,658,327
def build_consensus_from_haplotypes(haplotypes): """ # ======================================================================== BUILD CONSENSUS FROM HAPLOTYPES PURPOSE ------- Builds a consensus from a list of Haplotype objects. INPUT ----- [HAPLOTYPE LIST] [haplotypes] The list of haplotypes. RETURN ------ [String] consensus The consensus sequence. # ======================================================================== """ pileup = build_pileup_from_haplotypes(haplotypes) consensus = pileup.build_consensus() return consensus
977e59e77e45cb4ccce95875f4802a43028af060
3,658,328
from typing import List from typing import Dict from typing import Tuple def convert_data_for_rotation_averaging( wTi_list: List[Pose3], i2Ti1_dict: Dict[Tuple[int, int], Pose3] ) -> Tuple[Dict[Tuple[int, int], Rot3], List[Rot3]]: """Converts the poses to inputs and expected outputs for a rotation averaging algorithm. Args: wTi_list: List of global poses. i2Ti1_dict: Dictionary of (i1, i2) -> i2Ti1 relative poses. Returns: i2Ti1_dict's values mapped to relative rotations i2Ri1. wTi_list mapped to global rotations. """ wRi_list = [x.rotation() for x in wTi_list] i2Ri1_dict = {k: v.rotation() for k, v in i2Ti1_dict.items()} return i2Ri1_dict, wRi_list
1f058ae1925f5392416ec4711b55e849e277a24c
3,658,329
def all_arrays_to_gpu(f): """Decorator to copy all the numpy arrays to the gpu before function invokation""" def inner(*args, **kwargs): args = list(args) for i in range(len(args)): if isinstance(args[i], np.ndarray): args[i] = to_gpu(args[i]) return f(*args, **kwargs) return inner
25ea43a611ac8a63aa1246aaaf810cec71be4c3f
3,658,330
def create_intersect_mask(num_v, max_v): """ Creates intersect mask as needed by polygon_intersection_new in batch_poly_utils (for a single example) """ intersect_mask = np.zeros((max_v, max_v), dtype=np.float32) for i in range(num_v - 2): for j in range((i + 2) % num_v, num_v - int(i == 0)): intersect_mask[i, j] = 1. return intersect_mask
32d2758e704901aa57b70e0edca2b9292df2583a
3,658,331
def gdi_abuse_tagwnd_technique_bitmap(): """ Technique to be used on Win 10 v1703 or earlier. Locate the pvscan0 address with the help of tagWND structures @return: pvscan0 address of the manager and worker bitmap and the handles """ window_address = alloc_free_windows(0) manager_bitmap_handle = create_bitmap(0x100, 0x6D, 1) manager_bitmap_pvscan0 = window_address + 0x50 window_address = alloc_free_windows(0) worker_bitmap_handle = create_bitmap(0x100, 0x6D, 1) worker_bitmap_pvscan0 = window_address + 0x50 return (manager_bitmap_pvscan0, worker_bitmap_pvscan0, manager_bitmap_handle, worker_bitmap_handle)
e77253d082b9aaaa083c84ffdbe8a74ae0b84b0b
3,658,332
def check_stop() -> list: """Checks for entries in the stopper table in base db. Returns: list: Returns the flag, caller from the stopper table. """ with db.connection: cursor = db.connection.cursor() flag = cursor.execute("SELECT flag, caller FROM stopper").fetchone() return flag
b2694938541704508d5304bae9abff25da2e0fc9
3,658,334
def get_camelcase_name_chunks(name): """ Given a name, get its parts. E.g: maxCount -> ["max", "count"] """ out = [] out_str = "" for c in name: if c.isupper(): if out_str: out.append(out_str) out_str = c.lower() else: out_str += c out.append(out_str) return out
134a8b1d98af35f185b37c999fbf499d18bf76c5
3,658,336
def _GetBuildBotUrl(builder_host, builder_port): """Gets build bot URL for fetching build info. Bisect builder bots are hosted on tryserver.chromium.perf, though we cannot access this tryserver using host and port number directly, so we use another tryserver URL for the perf tryserver. Args: builder_host: Hostname of the server where the builder is hosted. builder_port: Port number of ther server where the builder is hosted. Returns: URL of the buildbot as a string. """ if (builder_host == PERF_BISECT_BUILDER_HOST and builder_port == PERF_BISECT_BUILDER_PORT): return PERF_TRY_SERVER_URL else: return 'http://%s:%s' % (builder_host, builder_port)
551ac7ee9079009cd8b52e41aeabb2b2e4e10c21
3,658,337
def case_two_args_positional_callable_first(replace_by_foo): """ Tests the decorator with one positional argument @my_decorator(goo) """ return replace_by_foo(goo, 'hello'), goo
fa5ca0af3d5af7076aebbb8364f29fc64b4e3c28
3,658,338
def cal_sort_key( cal ): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return (primary_key, selected_key, cal["summary"])
fd1d8b32ee904d3684decba54268d926c5fd3d82
3,658,339
from datetime import datetime def select_zip_info(sample: bytes) -> tuple: """Print a list of items contained within the ZIP file, along with their last modified times, CRC32 checksums, and file sizes. Return info on the item selected by the user as a tuple. """ t = [] w = 0 z = ZipFile(sample) for i in z.infolist(): if len(i.filename) > w: w = len(i.filename) t.append((i.filename, datetime(*i.date_time), i.CRC, i.file_size)) for i in range(len(t)): dt = t[i][1].strftime('%Y-%m-%d %H:%M:%S') crc = t[i][2].to_bytes(4, 'big').hex() print(f'{i + 1: >2}. {t[i][0]: <{w}} {dt} {crc} {t[i][3]}') n = input('\nEnter a number corresponding to the desired entry: ') print() return t[int(n) - 1]
aac5b04c40552c09d07bf2db0c2d4431fc168aa2
3,658,340
def unitary_ifft2(y): """ A unitary version of the ifft2. """ return np.fft.ifft2(y)*np.sqrt(ni*nj)
16dfe62cea08a72888cc3390f4d85f069aac5718
3,658,342
def orb_scf_input(sdmc): """ find the scf inputs used to generate sdmc """ myinputs = None # this is the goal sdep = 'dependencies' # string representation of the dependencies entry # step 1: find the p2q simulation id p2q_id = None for key in sdmc[sdep].keys(): if sdmc[sdep][key].result_names[0] == 'orbitals': p2q_id = key # end if # end for dep # step 2: find the nscf simulation nscf_id_list = sdmc[sdep][p2q_id]['sim'][sdep].keys() assert len(nscf_id_list) == 1 nscf_id = nscf_id_list[0] nscf = sdmc[sdep][p2q_id]['sim'][sdep][nscf_id] myinputs = nscf['sim']['input'] # step 3: find the scf simulation calc = myinputs['control']['calculation'] if (calc=='scf'): # scf may actually be the scf simulation pass # myinputs is already set elif (calc=='nscf'): # if nscf is not the scf, then we need to go deeper scf_id = nscf['sim'][sdep].keys()[0] scf = nscf['sim'][sdep][scf_id] myinputs = scf['sim']['input'] # this is it! scalc = myinputs['control']['calculation'] if scalc != 'scf': RuntimeError('nscf depends on %s instead of scf'%scalc) # end if else: raise RuntimeError('unknown simulation type %s'%calc) # end if return myinputs.to_dict()
c319693e9673edf540615025baf5b5199c5e27a3
3,658,343
def is_success(code): """ Returns the expected response codes for HTTP GET requests :param code: HTTP response codes :type code: int """ if (200 <= code < 300) or code in [404, 500]: return True return False
fa502b4989d80edc6e1c6c717b6fe1347f99990d
3,658,344
from typing import Optional from typing import Union async def asyncio( *, client: AuthenticatedClient, json_body: SearchEventIn, ) -> Optional[Union[ErrorResponse, SearchEventOut]]: """Search Event Dado um Trecho, uma lista de Grupos que resultam da pesquisa por esse Trecho e um price token, atualiza os preços dos Grupos e o token. Contabiliza visita (se passar na validação). Args: json_body (SearchEventIn): Returns: Response[Union[ErrorResponse, SearchEventOut]] """ return ( await asyncio_detailed( client=client, json_body=json_body, ) ).parsed
6bf2a312d41cf77776e0c333ed72080c030a7170
3,658,345
def get_symmtrafo(newstruct_sub): """??? Parameters ---------- newstruct_sub : pymatgen structure pymatgen structure of the bulk material Returns ------- trafo : ??? ??? """ sg = SpacegroupAnalyzer(newstruct_sub) trr = sg.get_symmetry_dataset() trafo = [] for index, op in enumerate(trr['rotations']): if np.linalg.norm(np.array([0,0,-1]) - op[2]) < 0.0000001 and np.linalg.det(op) > 0 : #print('transformation found' ,op, index, trr['translations'][index]) trafo ={'rot_frac': op.tolist(), 'trans_frac': trr['translations'][index].tolist() } break # Now we have the trafo (to be used on fractional coordinates) if trafo == []: for index, op in enumerate(trr['rotations']): if np.linalg.norm(np.array([0,0,-1]) - op[2]) < 0.0000001: #print('transformation found' ,op, index, trr['translations'][index]) trafo ={'rot_frac': op.tolist(), 'trans_frac': trr['translations'][index].tolist() } break return trafo
9a346b4d0761de467baae1ee5f4cb0c623929180
3,658,346
def convert_sentence_into_byte_sequence(words, tags, space_idx=32, other='O'): """ Convert a list of words and their tags into a sequence of bytes, and the corresponding tag of each byte. """ byte_list = [] tag_list = [] for word_index, (word, tag) in enumerate(zip(words, tags)): tag_type = get_tag_type(tag) if is_inside_tag(tag) and word_index > 0: byte_list += [space_idx] tag_list += [tag_type] elif word_index > 0: byte_list += [space_idx] tag_list += [other] b_seq = bytes(word, encoding='utf-8') nbytes = len(b_seq) byte_list += b_seq tag_list += [tag_type] * nbytes assert len(byte_list) == len(tag_list) return byte_list, tag_list
2288d22e44d99ee147c9684befd3d31836a66a9d
3,658,347
def get_number_rows(ai_settings, ship_height, alien_height): """Determina o numero de linhas com alienigenas que cabem na tela.""" available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height) number_rows = int(available_space_y / (2 * alien_height)) return number_rows
473f73bc5fb4d6e86acb90f01d861d4d8561d494
3,658,349
def map_ref_sites(routed: xr.Dataset, gauge_reference: xr.Dataset, gauge_sites=None, route_var='IRFroutedRunoff', fill_method='r2', min_kge=-0.41): """ Assigns segs within routed boolean 'is_gauge' "identifiers" and what each seg's upstream and downstream reference seg designations are. Parameters ---------- routed: xr.Dataset Contains the input flow timeseries data. gauge_reference: xr.Dataset Contains reference flow timeseries data for the same watershed as the routed dataset. gauge_sites: list, optional If None, gauge_sites will be taken as all those listed in gauge_reference. route_var: str Variable name of flows used for fill_method purposes within routed. This is defaulted as 'IRFroutedRunoff'. fill_method: str While finding some upstream/downstream reference segs may be simple, (segs with 'is_gauge' = True are their own reference segs, others may be easy to find looking directly up or downstream), some river networks may have multiple options to select gauge sites and may fail to have upstream/downstream reference segs designated. 'fill_method' specifies how segs should be assigned upstream/downstream reference segs for bias correction if they are missed walking upstream or downstream. Currently supported methods: 'leave_null' nothing is done to fill missing reference segs, np.nan values are replaced with a -1 seg designation and that's it 'forward_fill' xarray's ffill method is used to fill in any np.nan values 'r2' reference segs are selected based on which reference site that seg's flows has the greatest r2 value with 'kldiv' reference segs are selected based on which reference site that seg's flows has the smallest KL Divergence value with 'kge' reference segs are selected based on which reference site that seg's flows has the greatest KGE value with Returns ------- routed: xr.Dataset Routed timeseries with reference gauge site river segments assigned to each river segement in the original routed. """ if isinstance(gauge_sites, type(None)): gauge_sites = gauge_reference['site'].values else: # need to typecheck since we do a for loop later and don't # want to end up iterating through a string by accident assert isinstance(gauge_sites, list) gauge_segs = gauge_reference.sel(site=gauge_sites)['seg'].values routed['is_gauge'] = False * routed['seg'] routed['down_ref_seg'] = np.nan * routed['seg'] routed['up_ref_seg'] = np.nan * routed['seg'] routed['up_seg'] = 0 * routed['is_headwaters'] routed['up_seg'].values = [find_up(routed, s, sel_method=fill_method) for s in routed['seg'].values] for s in routed['seg']: if s in list(gauge_segs): routed['is_gauge'].loc[{'seg':s}] = True routed['down_ref_seg'].loc[{'seg': s}] = s routed['up_ref_seg'].loc[{'seg': s}] = s for seg in routed['seg']: cur_seg = seg.values[()] while cur_seg in routed['seg'].values and np.isnan(routed['down_ref_seg'].sel(seg=cur_seg)): cur_seg = routed['down_seg'].sel(seg=cur_seg).values[()] if cur_seg in routed['seg'].values: routed['down_ref_seg'].loc[{'seg':seg}] = routed['down_ref_seg'].sel(seg=cur_seg).values[()] for seg in routed['seg']: cur_seg = seg.values[()] while cur_seg in routed['seg'].values and np.isnan(routed['up_ref_seg'].sel(seg=cur_seg)): cur_seg = routed['up_seg'].sel(seg=cur_seg).values[()] if cur_seg in routed['seg'].values: routed['up_ref_seg'].loc[{'seg':seg}] = routed['up_ref_seg'].sel(seg=cur_seg).values[()] # Fill in any remaining nulls (head/tailwaters) if fill_method == 'leave_null': # since there should be no -1 segs from mizuroute, we can set nan's to -1 to acknowledge # that they have been addressed and still set them apart from the rest of the data routed['up_ref_seg'] = (routed['up_ref_seg'].where(~np.isnan(routed['up_ref_seg']), other=-1)) routed['down_ref_seg'] = (routed['down_ref_seg'].where(~np.isnan(routed['down_ref_seg']), other=-1)) elif fill_method == 'forward_fill': routed['up_ref_seg'] = (routed['up_ref_seg'].where( ~np.isnan(routed['up_ref_seg']), other=routed['down_ref_seg'])).ffill('seg') routed['down_ref_seg'] = (routed['down_ref_seg'].where( ~np.isnan(routed['down_ref_seg']), other=routed['up_ref_seg'])).ffill('seg') elif fill_method == 'r2': fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0] fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0] routed['r2_up_gauge'] = 0 * routed['is_gauge'] routed['r2_down_gauge'] = 0 * routed['is_gauge'] for curr_seg in routed['seg'].values: up_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values): up_ref_r2, up_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2 routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg else: # this seg has already been filled in, but r2 still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values up_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2 routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2 for curr_seg in routed['seg'].values: down_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values): down_ref_r2, down_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2 routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg else: # this seg has already been filled in, but r2 still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values down_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2 routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2 elif fill_method == 'kldiv': fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0] fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0] routed['kldiv_up_gauge'] = 0 * routed['is_gauge'] routed['kldiv_down_gauge'] = 0 * routed['is_gauge'] for curr_seg in routed['seg'].values: curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values): up_ref_kldiv, up_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg else: # this seg has already been filled in, but kldiv still needs to be calculated # kldiv computation could probably be gutted in the furture ... TINY_VAL = 1e-6 total_bins = int(np.sqrt(len(curr_seg_flow))) curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram( curr_seg_flow, bins=total_bins, density=True) curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg).values).values ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0] ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL up_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf) routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv for curr_seg in routed['seg'].values: curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values): down_ref_kldiv, down_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg else: # this seg has already been filled in, but kldiv still needs to be calculated # kldiv computation could probably be gutted in the furture ... TINY_VAL = 1e-6 total_bins = int(np.sqrt(len(curr_seg_flow))) curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram( curr_seg_flow, bins=total_bins, density=True) curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg).values).values ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0] ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL down_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf) routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv elif fill_method == 'kge': fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0] fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0] routed['kge_up_gauge'] = min_kge + 0.0 * routed['is_gauge'] routed['kge_down_gauge'] = min_kge + 0.0 * routed['is_gauge'] for curr_seg in routed['seg'].values: up_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values): up_ref_kge, up_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg else: # this seg has already been filled in, but kge still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values up_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow) routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge for curr_seg in routed['seg'].values: down_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values): down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg else: # this seg has already been filled in, but kge still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values down_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow) if down_ref_kge < min_kge: down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge else: raise ValueError('Invalid method provided for "fill_method"') return routed
a2146e532a7aa95ba0753aaddc6d6da2cc4f1c67
3,658,351
def get_error(est_track, true_track): """ """ if est_track.ndim > 1: true_track = true_track.reshape((true_track.shape[0],1)) error = np.recarray(shape=est_track.shape, dtype=[('position', float), ('orientation', float), ('orientation_weighted', float)]) # Position error pos_err = (true_track.x - est_track.x)**2 + (true_track.y - est_track.y)**2 error.position = np.sqrt(pos_err) # Orientation error error.orientation = anglediff(true_track.angle, est_track.angle, units='deg') error.orientation_weighted = anglediff(true_track.angle, est_track.angle_w, units='deg') descr = {} bix = np.logical_not(np.isnan(error.orientation)) descr['orientation_median'] = np.median(np.abs(error.orientation[bix])) descr['orientation_mean'] = np.mean(np.abs(error.orientation[bix])) bix = np.logical_not(np.isnan(error.orientation_weighted)) descr['orientation_weighted_median'] = np.nanmedian(np.abs(error.orientation_weighted[bix])) descr['orientation_weighted_mean'] = np.nanmean(np.abs(error.orientation_weighted[bix])) # no angle true_no_angle = np.isnan(true_track.angle) est_no_angle = np.isnan(est_track.angle) agree = np.logical_and(true_no_angle, est_no_angle) disagree = np.logical_xor(true_no_angle, est_no_angle) both = np.logical_or(true_no_angle, est_no_angle) #ipdb.set_trace() descr['no_angle_auc'] = roc_auc_score(true_no_angle, est_no_angle) descr['no_angle_mcc'] = matthews_corrcoef(true_no_angle, est_no_angle) descr['no_angle_brier'] = brier_score_loss(true_no_angle, est_no_angle) descr['no_angle_acc'] = agree.sum()/both.sum() descr['no_angle_p_per_frame'] = disagree.sum()/disagree.shape[0] descr['position_median'] = np.median(error.position) descr['position_mean'] = np.mean(error.position) #print('True frequency of angle-does-not-apply:', # true_no_angle.sum()/true_no_angle.shape[0]) #print('Estimated frequency of angle-does-not-apply:', # est_no_angle.sum()/est_no_angle.shape[0]) return error, descr
5ccdb12b844de9b454f62375358d4a1e1b91e6f7
3,658,352
from typing import Any def test_conflict(): """ Tiles that have extras that conflict with indices should produce an error. """ def tile_extras_provider(hyb: int, ch: int, z: int) -> Any: return { Indices.HYB: hyb, Indices.CH: ch, Indices.Z: z, } stack = synthetic_stack( tile_extras_provider=tile_extras_provider, ) with pytest.raises(ValueError): stack.tile_metadata
2d2e86f5d60762d509e7c27f5a74715c868abbc4
3,658,353
import json def get_node_to_srn_mapping(match_config_filename): """ Returns the node-to-srn map from match_conf.json """ with open(match_config_filename) as config_file: config_json = json.loads(config_file.read()) if "node_to_srn_mapping" in config_json: return config_json["node_to_srn_mapping"] else: node_to_srn = {} for node_info in config_json["NodeData"]: node_id = node_info["TrafficNode"] srn_num = node_info["srn_number"] node_to_srn[node_id] = srn_num return node_to_srn
37bf2f266f4e5163cc4d6e9290a8eaf17e220cd3
3,658,354
def nest_dictionary(flat_dict, separator): """ Nests a given flat dictionary. Nested keys are created by splitting given keys around the `separator`. """ nested_dict = {} for key, val in flat_dict.items(): split_key = key.split(separator) act_dict = nested_dict final_key = split_key.pop() for new_key in split_key: if not new_key in act_dict: act_dict[new_key] = {} act_dict = act_dict[new_key] act_dict[final_key] = val return nested_dict
f5b8649d916055fa5911fd1f80a8532e5dbee274
3,658,356
def write(path_, *write_): """Overwrites file with passed data. Data can be a string, number or boolean type. Returns True, None if writing operation was successful, False and reason message otherwise.""" return _writeOrAppend(False, path_, *write_)
3bd5db2d833c5ff97568489596d3dcea47c1a9f4
3,658,357
import json def prepare_saab_data(sequence): """ Processing data after anarci parsing. Preparing data for SAAB+ ------------ Parameters sequence - sequence object ( OAS database format ) ------------ Return sequence.Sequence - full (not-numbered) antibody sequence oas_output_parser(Numbered) - antibody sequence that is imgt numbered to comply with SAAB+ input format sequence_info_dict - Dictionary that contains sequence metadata which is requeired for SAAB+ to run """ cdr3sequence = sequence.CDRH3 VGene = sequence.VGene[:5] Numbered = json.loads( sequence.Numbered ) CDRs = [ loop for loop in Numbered.keys() if "cdr" in loop ] sequence_info_dict = { formatLoops[loop] : Numbered[loop] if "3" not in loop else cdr3sequence for loop in CDRs } sequence_info_dict["V"] = VGene sequence_info_dict["Redundancy"] = find_redundancy( sequence.Redundancy ) return sequence_obj( sequence.Sequence, oas_output_parser(Numbered), sequence_info_dict )
f88ba3f2badb951f456678e33f3371d80934754e
3,658,358
import math def _is_equidistant(array: np.ndarray) -> bool: """ Check if the given 1D array is equidistant. E.g. the distance between all elements of the array should be equal. :param array: The array that should be equidistant """ step = abs(array[1] - array[0]) for i in range(0, len(array) - 1): curr_step = abs(array[i + 1] - array[i]) if not math.isclose(curr_step, step, rel_tol=1e-3): return False return True
d12c12e48545697bdf337c8d20e45a27fb444beb
3,658,360
def list_a_minus_b(list1, list2): """Given two lists, A and B, returns A-B.""" return filter(lambda x: x not in list2, list1)
8fbac6452077ef7cf73e0625303822a35d0869c3
3,658,361
def is_equivalent(a, b): """Compares two strings and returns whether they are the same R code This is unable to determine if a and b are different code, however. If this returns True you may assume that they are the same, but if this returns False you must not assume that they are different. is_equivalent("0 + 1", "1") is False, for example, even though those two commands do the same thing. """ # String pointers ap = 0 bp = 0 ps = 0 an_comp = False while ap < len(a) and bp < len(b): # If none of the current chars are alphanumeric or the last character match is not alphanumeric then skip # whitespace forward if (a[ap] not in _an and b[bp] not in _an) or not an_comp: while ap < len(a) and a[ap] in _ws and not _is_a_number(a, ap): ap += 1 while bp < len(b) and b[bp] in _ws and not _is_a_number(b, bp): bp += 1 if ap >= len(a) or bp >= len(b): # Reached end of string break an_comp = False if a[ap] != b[bp]: # They must be equal # print("Failed {}:{} / {}:{}".format(a, ap, b, bp)) return False if a[ap] in _an: # This is comparing two alphanumeric values an_comp = True if a[ap] in _quotes: opener = a[ap] # String; must match exactly ap += 1 bp += 1 while ap < len(a) and bp < len(b) and a[ap] == b[bp]: if a[ap] == opener and a[ap-1] not in _esc: break ap += 1 bp += 1 else: # print("Failed {}:{} / {}:{} in string".format(a, ap, b, bp)) return False ap += 1 bp += 1 # Clean up ending whitespace while ap < len(a) and a[ap] in _ws: ap += 1 while bp < len(b) and b[bp] in _ws: bp += 1 if ap >= len(a) and bp >= len(b): return True else: return False
c37ea6e8684c1d2fcd5d549836c9115da98c7b2f
3,658,362
def solve(lines, n): """Solve the problem.""" grid = Grid(lines) for _ in range(n): grid.step() return grid.new_infections
2db532a911e088dd58ee17bdc036ea017e979c8d
3,658,363
import requests def get_ingredient_id(): """Need to get ingredient ID in order to access all attributes""" query = request.args["text"] resp = requests.get(f"{BASE_URL_SP}/food/ingredients/search?", params={"apiKey":APP_KEY,"query":query}) res = resp.json() lst = {res['results'][i]["name"]:res['results'][i]["id"] for i in range(len(res['results']))} return jsonify(lst)
8c58232f48883a4b1e2d76ca1504b3dccabdb954
3,658,364
def xticks(ticks=None, labels=None, **kwargs): """ Get or set the current tick locations and labels of the x-axis. Call signatures:: locs, labels = xticks() # Get locations and labels xticks(ticks, [labels], **kwargs) # Set locations and labels Parameters ---------- ticks : array_like A list of positions at which ticks should be placed. You can pass an empty list to disable xticks. labels : array_like, optional A list of explicit labels to place at the given *locs*. **kwargs :class:`.Text` properties can be used to control the appearance of the labels. Returns ------- locs An array of label locations. labels A list of `.Text` objects. Notes ----- Calling this function with no arguments (e.g. ``xticks()``) is the pyplot equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on the current axes. Calling this function with arguments is the pyplot equivalent of calling `~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes. Examples -------- Get the current locations and labels: >>> locs, labels = xticks() Set label locations: >>> xticks(np.arange(0, 1, step=0.2)) Set text labels: >>> xticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue')) Set text labels and properties: >>> xticks(np.arange(12), calendar.month_name[1:13], rotation=20) Disable xticks: >>> xticks([]) """ ax = gca() if ticks is None and labels is None: locs = ax.get_xticks() labels = ax.get_xticklabels() elif labels is None: locs = ax.set_xticks(ticks) labels = ax.get_xticklabels() else: locs = ax.set_xticks(ticks) labels = ax.set_xticklabels(labels, **kwargs) for l in labels: l.update(kwargs) return locs, silent_list('Text xticklabel', labels)
a6b044ffc9efdc279495c25735745006de9d7a8c
3,658,365
def main() -> None: """ Program entry point. :return: Nothing """ try: connection = connect_to_db2() kwargs = {'year_to_schedule': 2018} start = timer() result = run(connection, **kwargs) output_results(result, connection) end = timer() print(f'time elapsed: {end - start}') connection.close() except Exception as e: print(f'Something broke ...\n\tReason:{str(e)}') connection.close() exit(1) return None
53727547a16c8b203ca89d54f55ddbd8b2f2645b
3,658,366
def delete(home_id): """ Delete A About --- """ try: return custom_response({"message":"deleted", "id":home_id}, 200) except Exception as error: return custom_response(str(error), 500)
408fe8db0a728b33d7a9c065944d706d6502b8b5
3,658,368
def round_to_sigfigs(x, sigfigs=1): """ >>> round_to_sigfigs(12345.6789, 7) # doctest: +ELLIPSIS 12345.68 >>> round_to_sigfigs(12345.6789, 1) # doctest: +ELLIPSIS 10000.0 >>> round_to_sigfigs(12345.6789, 0) # doctest: +ELLIPSIS 100000.0 >>> round_to_sigfigs(12345.6789, -1) # doctest: +ELLIPSIS 1000000.0 """ place = int(log(x, 10)) if sigfigs <= 0: additional_place = x > 10. ** place return 10. ** (-sigfigs + place + additional_place) return round_to_place(x, sigfigs - 1 - place)
a5191f3c60e85d50a47a43aee38d7d1f14d3fdc6
3,658,369
import urllib import json def load_api_data (API_URL): """ Download data from API_URL return: json """ #actual download with urllib.request.urlopen(API_URL) as url: api_data = json.loads(url.read().decode()) #testing data ##with open('nrw.json', 'r') as testing_set: ## api_data = json.load(testing_set) return api_data
61832a798ac616f3d1612ce69411d4f43ed85699
3,658,370
def test_parsing(monkeypatch, capfd, configuration, expected_record_keys): """Verifies the feed is parsed as expected""" def mock_get(*args, **kwargs): return MockResponse() test_tap: Tap = TapFeed(config=configuration) monkeypatch.setattr(test_tap.streams["feed"]._requests_session, "send", mock_get) test_tap.sync_all() out, err = capfd.readouterr() tap_records = get_parsed_records(out) assert len(tap_records) == 10 for record in tap_records: print(record) assert record["type"] == "RECORD" assert record["stream"] == "feed" assert record["record"]["feed_url"] == MockResponse.url assert list(record["record"].keys()) == expected_record_keys
25a79966eba641e4b857c80e12fb123e8fc3477f
3,658,371
def hsl(h, s, l): """Converts an Hsl(h, s, l) triplet into a color.""" return Color.from_hsl(h, s, l)
081fb4b7e7fc730525d0d18182c951ad92fab895
3,658,372