content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def need_verified_email(request, *args, **kwargs): # pylint: disable=unused-argument """ Returns error page for unverified email on edX """ return standard_error_page(request, 401, "verify_email.html")
258f0cd7cc9d48724a4192397742ad476baf0aaf
335
def random_masking(token_ids_all): """对输入进行随机mask,增加泛化能力 """ result = [] for token_ids in token_ids_all: rands = np.random.random(len(token_ids)) result.append([ t if r > 0.15 else np.random.choice(token_ids) for r, t in zip(rands, token_ids) ]) return result
5798fd271b6f8a1749ef04139c44a53ef2801473
336
def used_caches_and_sources(layers, caches, sources): """ Find used cache and source names in layers and caches configuration. """ used_layer_sources = find_layer_sources(layers) used_cache_sources = find_cache_sources(caches) all_used_sources = used_layer_sources.union(used_cache_sources) avail_caches = set(caches.keys()) avail_sources = set(sources.keys()) used_caches = avail_caches.intersection(all_used_sources) used_sources = avail_sources.intersection(all_used_sources).difference(used_caches) return used_caches, used_sources
21df59bea5cf4d336f9f103de841bfbbedadf3d5
338
from typing import Union def encode_labels( labels: Union[list, np.ndarray, pd.Series], multi_label: bool = False, sep: str = '|' ): """Encode labels Return coded labels, encoder, and decoder. Examples: >>> # multi-class problem >>> labels = ['OK', 'OK', 'NG1', 'NG2', 'OK'] >>> encode_labels(labels) ( [0, 0, 1, 2, 0], {'OK': 0, 'NG1': 1, 'NG2': 2}, {0: 'OK', 1: 'NG1', 2: 'NG2} ) >>> # multi-label problem, a.k.a. one hot encoding >>> labels = ['dog', 'cat', 'dog|cat'] >>> encode_labels(labels, multi_label=True) ( [[0, 1], [1, 0], [1, 1]], {'dog': 0, 'cat': 1}, {0: 'dog', 1: 'cat'} ) Args: labels (list, np.ndarray): List of labels with string elements. multi_label (bool, optional): Is multi label classification. sep (str, optional): For multi-label only. Default is '|'. Returns: list or np.array: Coded labels. List in list out, array in array out. dict: encoder dict: decoder """ # get classes if not multi_label: classes = mlsorted(filter(None, set(labels))) else: classes = mlsorted( {labs for item in filter(None, labels) for labs in item.split(sep)} ) classes = [_ for _ in classes if _ not in ['']] n_classes = len(classes) # generate encoder and decoder encoder = {_class: code for code, _class in enumerate(classes)} decoder = {v: k for k, v in encoder.items()} # create coded labels if not multi_label: coded_labels = [encoder[x] if x is not None else x for x in labels] else: coded_labels = list() for x in labels: labs = [0] * n_classes if x is not None: for lab in x.split(sep): labs[encoder[lab]] = 1 coded_labels.append(labs) # to numpy or to dataframe if isinstance(labels, (pd.Series, pd.DataFrame)): if multi_label: coded_labels = pd.DataFrame( coded_labels, columns=encoder.keys() ) else: coded_labels = pd.DataFrame( {'y': coded_labels}, dtype=np.int32 ) elif isinstance(labels, (np.ndarray, Categorical)): coded_labels = np.array(coded_labels, dtype=np.int32) return coded_labels, encoder, decoder
2cd3ec563edfe0d0f42df3018bfd2cf007738c9d
339
def sigmoid_xent(*, logits, labels, reduction=True): """Computes a sigmoid cross-entropy (Bernoulli NLL) loss over examples.""" log_p = jax.nn.log_sigmoid(logits) log_not_p = jax.nn.log_sigmoid(-logits) nll = -jnp.sum(labels * log_p + (1. - labels) * log_not_p, axis=-1) return jnp.mean(nll) if reduction else nll
a427532ddf0feba69879bc5b5d5a9a34d71d9ca6
340
def is_palindrome(s: str) -> bool: """Return whether a string is a palindrome This is as efficient as you can get when computing whether a string is a palindrome. It runs in O(n) time and O(1) space. """ if len(s) <= 1: return True i = 0 j = len(s) - 1 while i < j: if s[i] != s[j]: return False i += 1 j -= 1 return True
6d3001486fe3603a17e72861e3bdea495cd675c1
341
def accuracy(pred_cls, true_cls, nclass=3): """ compute per-node classification accuracy """ accu = [] for i in range(nclass): intersect = ((pred_cls == i) + (true_cls == i)).eq(2).sum().item() thiscls = (true_cls == i).sum().item() accu.append(intersect / thiscls) return np.array(accu)
208c2e31b5df37179b2d67a2b8423c3236c64264
342
def my_hostogram(gray, bins): """ pixel values has to be within bins range, otherwise index out of range, for example if pixel 400th has value 70, but bins are -> [0...40], then histogram[70] yields IOR """ histogram = [0 for i in bins] for i in range(gray.shape[0]): for j in range(gray.shape[1]): histogram[gray[i][j]] = histogram[gray[i][j]] + 1 return histogram
a2e774fb7b2249325191b20e6fa08847e38211c2
343
def reverse(password, position_x, position_y): """Reverse from position_x to position_y in password.""" password_slice = password[position_x:position_y + 1] password[position_x:position_y + 1] = password_slice[::-1] return password
46fec2c6b9c02d8efa71d53451974e46cbe68102
344
from typing import Union from typing import List import random def gen_sentence( start_seq: str = None, N: int = 4, prob: float = 0.001, output_str: bool = True ) -> Union[List[str], str]: """ Text generator using Thai2fit :param str start_seq: word for begin word. :param int N: number of word. :param bool output_str: output is str :param bool duplicate: duplicate word in sent :return: list words or str words :rtype: List[str], str :Example: :: from pythainlp.generate.thai2fit import gen_sentence gen_sentence() # output: 'แคทรียา อิงลิช (นักแสดง' gen_sentence("แมว") # output: 'แมว คุณหลวง ' """ if start_seq is None: start_seq = random.choice(list(thwiki_itos)) list_word = learn.predict( start_seq, N, temperature=0.8, min_p=prob, sep='-*-' ).split('-*-') if output_str: return ''.join(list_word) return list_word
800b498498396a4cda84885481b09df689f541aa
345
def GetBoolValueFromString(s): """Returns True for true/1 strings, and False for false/0, None otherwise.""" if s and s.lower() == 'true' or s == '1': return True elif s and s.lower() == 'false' or s == '0': return False else: return None
d6ef53e837fc825a32e073e3a86185093dd1d037
346
def genomic_del6_abs_37(genomic_del6_37_loc): """Create test fixture absolute copy number variation""" return { "type": "AbsoluteCopyNumber", "_id": "ga4gh:VAC.60XjT6dzYKX8rn6ocG4AVAxCoUFfdjI6", "subject": genomic_del6_37_loc, "copies": {"type": "Number", "value": 1} }
64d8bb95768587adef71c8a98111e0454dfdbb93
347
def get_typical_qualifications(cfg): """ create qualification list to filter just workers with: - + 98% approval rate - + 500 or more accepted HIT - Location USA :param cfg: :return: """ if not cfg['hit_type'].getboolean('apply_qualification'): return [] qualification_requirements=[ { # Worker_​NumberHITsApproved 'QualificationTypeId': '00000000000000000040', 'Comparator': 'GreaterThanOrEqualTo', 'IntegerValues': [ 500, ], 'RequiredToPreview': False, 'ActionsGuarded': 'Accept' }, { # Worker_​PercentAssignmentsApproved 'QualificationTypeId': '000000000000000000L0', 'Comparator': 'GreaterThanOrEqualTo', 'IntegerValues': [ 98, ], 'RequiredToPreview': False, 'ActionsGuarded': 'Accept' }, { # Worker_Locale 'QualificationTypeId': '00000000000000000071', 'Comparator': 'EqualTo', 'LocaleValues': [ { 'Country':"US" } ], 'RequiredToPreview': False, 'ActionsGuarded': 'Accept' }, ] return qualification_requirements
4cfad92d7c2587e2fce1caeac032a69f87c70c01
348
def _shell_wrap_inner(command, shell=True, sudo_prefix=None): """ Conditionally wrap given command in env.shell (while honoring sudo.) (Modified from fabric.operations._shell_wrap to avoid double escaping, as the wrapping host command would also get shell escaped.) """ # Honor env.shell, while allowing the 'shell' kwarg to override it (at # least in terms of turning it off.) if shell and not env.use_shell: shell = False # Sudo plus space, or empty string if sudo_prefix is None: sudo_prefix = "" else: sudo_prefix += " " # If we're shell wrapping, prefix shell and space, escape the command and # then quote it. Otherwise, empty string. if shell: shell = env.shell + " " command = '"%s"' % command # !! removed _shell_escape() here else: shell = "" # Resulting string should now have correct formatting return sudo_prefix + shell + command
6a1a185262e312aac193b70babf9c4b8c1fc2c73
350
from typing import List import time def events_until(events: List[ScheduleEvent], until: time, *, after: time = None) \ -> List[ScheduleEvent]: """ Return events up to and including the given time. Keyword arguments: after -- if specified, only events after this time will be included. """ if after is not None: events = events_after(events, after) return [event for event in events if event[0] <= until]
8d7390c684fb5590ad1fbdaa0680b3aff7474c56
351
import socket def get_ip(): """ Get local ip from socket connection :return: IP Addr string """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('bing.com', 80)) return s.getsockname()[0]
9afb9cfc5721ea7a89764579bd878a9b51361af2
352
import unicodedata def shave_marks(txt): """去掉全部变音符号""" norm_txt = unicodedata.normalize('NFD', txt) # 把所有的字符分解为基字符和组合记号 shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c)) # 过滤掉所有的组合记号 return unicodedata.normalize('NFC', shaved)
0b8e15c72854a5bca7b12f6452292d7472bbf1bc
353
def _kld_gamma(p_data, q_data): """ Computes the Kullback-Leibler divergence between two gamma PDFs Parameters ---------- p_data: np.array Data of the first process q_data: np.array Data of the first process Returns ------- r_kld_gamma: numeric Kullback-Leibler Divergence Quantity References ---------- [1] Bauckhage, Christian. (2014). Computing the Kullback-Leibler Divergence between two Generalized Gamma Distributions. arXiv. 1401.6853. """ # -------------------------------------------------------------------------- Distribution Parameters -- # def _gamma_params(data, method='MoM'): """ Computes the parameters of a gamma probability density function (pdf), according to the selected method. Parameters ---------- data: np.array The data with which will be adjusted the pdf method: str Method to calculate the value of the parameters for the pdf 'MoM': Method of Moments (Default) Returns ------- r_params: dict {'alpha': gamma distribution paramerter, 'beta': gamma distribution parameter} """ # -- Methods of Moments -- # if method == 'MoM': # first two moments mean = np.mean(data) variance = np.var(data) # sometimes refered in literature as k alpha = mean**2/variance # sometimes refered in literature as 1/theta beta = mean/variance # return the gamma distribution empirically adjusted parameters return alpha, beta # -- For errors or other unsupported methods else: raise ValueError("Currently, the supported methods are: 'MoM'") # alpha_1: Distribution 1: shape parameter, alpha_1 > 0 # beta_1: Distribution 1: rate or inverse scale distribution parameter, beta_1 > 0 alpha_1, beta_1 = _gamma_params(data=p_data) # alpha_2: Distribution 2: shape parameter, alpha_2 > 0 # beta_2: Distribution 2: rate or inverse scale parameter, beta_2 > 0 alpha_2, beta_2 = _gamma_params(data=q_data) # Expression with beta instead of theta theta_1 = 1/beta_1 theta_2 = 1/beta_2 p1, p2 = 1, 1 # Generalized Gamma Distribution with p=1 is a gamma distribution [1] # Calculations, see [1] for mathematical details. a = p1*(theta_2**alpha_2)*sps.gamma(alpha_2/p2) b = p2*(theta_1**alpha_1)*sps.gamma(alpha_1/p1) c = (((sps.digamma(alpha_1/p1))/p1) + np.log(theta_1))*(alpha_1 - alpha_2) # Bi-gamma functions d = sps.gamma((alpha_1+p2)/p1) e = sps.gamma((alpha_1/p1)) # Calculations f = (theta_1/theta_2)**(p2) g = alpha_1/p1 # General calculation and output r_kld = np.log(a/b) + c + (d/e)*f - g # Final Kullback-Leibler Divergence for Empirically Adjusted Gamma PDFs return r_kld
c20fd6764299300dc555bca356d9942e98d38214
354
def interpolate_rat(nodes, values, use_mp=False): """Compute a rational function which interpolates the given nodes/values. Args: nodes (array): the interpolation nodes; must have odd length and be passed in strictly increasing or decreasing order values (array): the values at the interpolation nodes use_mp (bool): whether to use ``mpmath`` for extended precision. Is automatically enabled if `nodes` or `values` use ``mpmath``. Returns: BarycentricRational: the rational interpolant. If there are `2n + 1` nodes, both the numerator and denominator have degree at most `n`. References: https://doi.org/10.1109/LSP.2007.913583 """ # ref: (Knockaert 2008), doi:10.1109/LSP.2007.913583 # see also: (Ionita 2013), PhD thesis, Rice U values = np.asanyarray(values) nodes = np.asanyarray(nodes) n = len(values) // 2 + 1 m = n - 1 if not len(values) == n + m or not len(nodes) == n + m: raise ValueError('number of nodes should be odd') xa, xb = nodes[0::2], nodes[1::2] va, vb = values[0::2], values[1::2] # compute the Loewner matrix B = (vb[:, None] - va[None, :]) / (xb[:, None] - xa[None, :]) # choose a weight vector in the nullspace of B weights = _nullspace_vector(B, use_mp=use_mp) return BarycentricRational(xa, va, weights)
cdc98a0a04a6d35fb409fb4235dab759c1f96c1c
355
import re def binder_update_page_range(payload): """Parser for `binder_update_page_range`""" try: match = re.match(binder_update_page_range_pattern, payload) if match: match_group_dict = match.groupdict() return BinderUpdatePageRange(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4))) except Exception as e: raise ParserError(e.message)
82f3bc931bebc5816b38feb75f918779fa271840
356
import re def generate_breadcrumb(url: str, separator: str) -> str: """ Fungsi yang menerima input berupa string url dan separator dan mengembalikan string yang berisi navigasi breadcrumb. Halaman Wikipedia tentang navigasi breadcrumb: https://en.wikipedia.org/wiki/Breadcrumb_navigation Contoh: >>> generate_breadcrumb("youtube.com", " > ") '<span class="active">HOME</span>' >>> generate_breadcrumb("https://github.com/harmonify/index.html", " > ") '<a href="/">HOME</a> > <span class="active">HARMONIFY</span>' >>> generate_breadcrumb("facebook.com/sebuah-slug-yang-panjang-sekali", " / ") '<a href="/">HOME</a> / <span class="active">SSYPS</span>' """ # inisialisasi variabel untuk menampung hasil result = [] # ambil path dari url path = parse_path(url) # filter akhiran index.* dari path path = re.sub(r"index\.?.*$", "", path) # split path menjadi list pathList = path.split("/") if pathList[-1] == "": pathList.pop() # generate tag anchor dari awal sampai dengan # elemen kedua terakhir dari pathList for i in range(len(pathList[:-1])): url = "/".join(pathList[: i + 1]) desc = generate_description(pathList[i]) anchor = generate_anchor_tag(url, desc) result.append(anchor) # generate tag span dengan elemen terakhir dari pathList span = generate_span_tag(generate_description(pathList[-1])) result.append(span) # return hasil join tag anchor dengan separator return separator.join(result)
8fc4e84ba68a5ff0d359ded9c70aef9ebec89b32
357
from pathlib import Path def extract_all_sentences(dataset_path, features_outfile=None): """ Extract features from sentences using pretrained universal sentence embeddings and save them in a pickle file :param dataset_path: the path of the dataset to use :param features_outfile: file used to store the extracted features :return: extracted embeddings """ model_path = Path(__file__).parent.parent.parent / "data" / "models" / "use" use = hub.load(str(model_path.absolute())) feature_extractor = TextFeatureExtractor(use) return feature_extractor.extract_all_features(dataset_path, features_outfile)
259ef284310c8bc2a52aa201ec17522e4b00b6d1
359
def search_sorted(array, value): """ Searches the given sorted array for the given value using a BinarySearch which should execute in O(log N). array a 1D sorted numerical array value the numerical value to search for returns index of array closest to value returns None if value is outside variable bounds """ def index_to_check(rmin, rmax): return (rmin + rmax) / 2 range_min = 0 range_max_0 = len(array) range_max = range_max_0 numloops = 0 while numloops < 100: numloops += 1 if (range_max - range_min) == 1: if (range_max == range_max_0) or (range_min == 0): raise LookupError("For some reason, range_max-" +\ "range_min reached 1 before " +\ "the element was found. The " +\ "element being searched for " +\ ("was %s. (min,max)" % (value,) +\ ("=%s" % ((range_min, range_max),)))) else: high_index = range_max else: high_index = index_to_check(range_min, range_max) high_val = array[high_index] low_val = array[high_index - 1] if value < low_val: range_max = high_index elif value > high_val: range_min = high_index else: # low_val <= value <= high_val if (2 * (high_val - value)) < (high_val - low_val): return high_index else: return high_index - 1 raise NotImplementedError("Something went wrong! I " +\ "caught a pseudo-infinite loop!")
6eec5fb24cd2da1989b4b80260ce185191d782f1
362
from typing import Dict import attr def to_doc(d: DatasetDoc) -> Dict: """ Serialise a DatasetDoc to a dict If you plan to write this out as a yaml file on disk, you're better off with `to_formatted_doc()`. """ doc = attr.asdict( d, recurse=True, dict_factory=dict, # Exclude fields that are the default. filter=lambda attr, value: "doc_exclude" not in attr.metadata and value != attr.default # Exclude any fields set to None. The distinction should never matter in our docs. and value is not None, retain_collection_types=False, ) doc["$schema"] = ODC_DATASET_SCHEMA_URL if d.geometry is not None: doc["geometry"] = shapely.geometry.mapping(d.geometry) doc["id"] = str(d.id) doc["properties"] = dict(d.properties) return doc
83a3ca0838074e000238765c34067e8086e4a2ab
363
def annealing_exp(start, end, pct): """Exponentially anneal from start to end as pct goes from 0.0 to 1.0.""" return start * (end / start) ** pct
4517b07ad7d065a1ba8d4f963c688677846640e3
364
def _compile_theano_function(param, vars, givens=None): """Compile theano function for a given parameter and input variables. This function is memoized to avoid repeating costly theano compilations when repeatedly drawing values, which is done when generating posterior predictive samples. Parameters ---------- param : Model variable from which to draw value vars : Children variables of `param` givens : Variables to be replaced in the Theano graph Returns ------- A compiled theano function that takes the values of `vars` as input positional args """ return function(vars, param, givens=givens, rebuild_strict=True, on_unused_input='ignore', allow_input_downcast=True)
bed6879a63beebe3af8eaabf654e5617e550e971
365
def redirect(url): """Create a response object representing redirection. :param url: a URL :return: a Response """ headers = { "Location": url, } return Response(headers=headers, code=HTTPStatus.FOUND)
13a61d5854fd5ef50ce51e38a0dc38af282a5693
366
import json def remove_ordereddict(data, dangerous=True): """turns a nested OrderedDict dict into a regular dictionary. dangerous=True will replace unserializable values with the string '[unserializable]' """ # so nasty. return json.loads(json_dumps(data, dangerous))
f5ca4db424c721a5e9015e77cd727f71b3912699
367
from typing import List def evaluate_v1(tokens: List[str]) -> Number: """Evaluates a tokenized expression and returns the result""" stack: List = [] for token in tokens: stack = consume_token(token, stack) return get_result_from_stack(stack)
1507baf55f427096b12690d76854d0189ec1571e
368
def load_gromacs_reaction_coord_files(us_path, n_wins, step=10, verbose=False): """ Parameters ---------- us_path: string Path to the xvg files with sampled reaction coordinate values n_wins: integer Number of umbrella runs step: integer Time interval for analysis verbose: Boolean Verbosity Outputs ------- us_pull_l: list list of reaction coordinates values sampled in the umbrella runs """ us_pull_l = [] bar = pyprind.ProgBar(n_wins, update_interval=15) for win_i in (range(1, n_wins+1)): if verbose: print(win_i) us_pull_l.append( np.loadtxt(us_path.format(win_i), skiprows=17)[::step]) bar.update(force_flush=False) return us_pull_l
47a304592306b142b96638f40410685ce31e0482
369
from typing import Dict def h_customer_role_playing( process_configuration: Dict[str, str], h_customer: Hub, staging_table: StagingTable ) -> RolePlayingHub: """Define h_customer_role_playing test hub. Args: process_configuration: Process configuration fixture value. h_customer: Hub customer fixture value. staging_table: Staging table fixture value. Returns: Deserialized role playing hub h_customer_role_playing. """ h_customer_role_playing_fields = [ Field( parent_table_name="h_customer_role_playing", name="h_customer_role_playing_hashkey", data_type=FieldDataType.TEXT, position=1, is_mandatory=True, length=32, ), Field( parent_table_name="h_customer_role_playing", name="r_timestamp", data_type=FieldDataType.TIMESTAMP_NTZ, position=2, is_mandatory=True, ), Field( parent_table_name="h_customer_role_playing", name="r_source", data_type=FieldDataType.TEXT, position=3, is_mandatory=True, ), Field( parent_table_name="h_customer_role_playing", name="customer_role_playing_id", data_type=FieldDataType.TEXT, position=4, is_mandatory=True, ), ] h_customer_role_playing = RolePlayingHub( schema=process_configuration["target_schema"], name="h_customer_role_playing", fields=h_customer_role_playing_fields, ) h_customer_role_playing.parent_table = h_customer h_customer_role_playing.staging_table = staging_table return h_customer_role_playing
f8f6fadc9dad8c637fbf173a2d10378f087954f6
370
def _js_requires(offline: bool = False) -> str: """Format JS requires for Plotly dependency. Args: offline: if True, inject entire Plotly library for offline use. Returns: str: <script> block with Plotly dependency. """ helper_fxns = _load_js_resource(_AxPlotJSResources.HELPER_FXNS) if offline: script = Template(_load_js_resource(_AxPlotJSResources.PLOTLY_OFFLINE)).render( library=plotly_offline.offline.get_plotlyjs() ) else: script = _load_js_resource(_AxPlotJSResources.PLOTLY_ONLINE) return script + helper_fxns
cc5bf8b5c6a840b9905008c0986f5b7ef65d6942
374
def resnet_retinanet(num_classes, backbone='resnet50', inputs=None, modifier=None, **kwargs): """ Constructs a retinanet model using a resnet backbone. Args num_classes: Number of classes to predict. backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')). inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)). modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example). Returns RetinaNet model with a ResNet backbone. """ # choose default input if inputs is None: inputs = keras.layers.Input(shape=(None, None, 3)) # create the resnet backbone if backbone == 'resnet50': resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True) elif backbone == 'resnet101': resnet = keras_resnet.models.ResNet101(inputs, include_top=False, freeze_bn=True) elif backbone == 'resnet152': resnet = keras_resnet.models.ResNet152(inputs, include_top=False, freeze_bn=True) else: raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone)) # invoke modifier if given if modifier: resnet = modifier(resnet) # create the full model return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=resnet.outputs[1:], **kwargs)
9e44d811cc0da8e7810731379259f8921483e907
375
def trans_full_matrix_projection(input, size=0, param_attr=None): """ Different from full_matrix_projection, this projection performs matrix multiplication, using transpose of weight. .. math:: out.row[i] += in.row[i] * w^\mathrm{T} :math:`w^\mathrm{T}` means transpose of weight. The simply usage is: .. code-block:: python proj = trans_full_matrix_projection(input=layer, size=100, param_attr=ParamAttr( name='_proj', initial_mean=0.0, initial_std=0.01)) :param input: input layer :type input: LayerOutput :param size: The parameter size. Means the width of parameter. :type size: int :param param_attr: Parameter config, None if use default. :type param_attr: ParameterAttribute :return: A TransposedFullMatrixProjection Object. :rtype: TransposedFullMatrixProjection """ proj = TransposedFullMatrixProjection( input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj
c8c69a01bf311d449ec6b9af68d61fe917073c75
376
def http_head_deck_etag(gist_url): """Perform a HEAD against gist_url and return the etag.""" class HeadRequest(Request): def get_method(self): return 'HEAD' head_request = HeadRequest(gist_url + '/raw') response = urlopen(head_request) headers = response.headers etag = headers['etag'] return etag
b5f4d4ebb80ec95059562c500edc8fc3a4040064
377
def _get_fluxes(sol, reactions): """Get the primal values for a set of variables.""" fluxes = { r.id: sol.fluxes.loc[r.community_id, r.global_id] for r in reactions } return pd.Series(fluxes)
da5ff0af1a3072baca70ac338ee29a7ea91606ac
378
def compatible_elfs(elf1, elf2): """See if two ELFs are compatible This compares the aspects of the ELF to see if they're compatible: bit size, endianness, machine type, and operating system. Parameters ---------- elf1 : ELFFile elf2 : ELFFile Returns ------- True if compatible, False otherwise """ osabis = frozenset([e.header['e_ident']['EI_OSABI'] for e in (elf1, elf2)]) compat_sets = (frozenset('ELFOSABI_%s' % x for x in ('NONE', 'SYSV', 'GNU', 'LINUX', )), ) return ((len(osabis) == 1 or any(osabis.issubset(x) for x in compat_sets)) and elf1.elfclass == elf2.elfclass and elf1.little_endian == elf2.little_endian and elf1.header['e_machine'] == elf2.header['e_machine'])
808c52de45e96d177429ebe1339f4a97c2c219d0
379
def _tear_down_response(data): """Helper function to extract header, payload and end from received response data.""" response_header = data[2:17] # Below is actually not used response_payload_size = data[18] response_payload = data[19:-2] response_end = data[-2:] return response_header, response_payload, response_end
0c9684c2c054beaff018f85a6775d46202d0095a
381
import pymysql def read_data_from_bd(query, host, user, port, database, password): """ get data from abc database arg: query: sql username: database username password: database password return: df: dataframe """ connection = pymysql.connect(host=host, user=user, port=port, db=database, password=password) df = pd.read_sql(query, connection) return df
a7ff96f9bda9b71bced1baaceeeb7c7797839bb4
382
def stack_atomic_call_middleware(q_dict, q_queryset, logger, middleware): """ Calls the middleware function atomically. * Returns cached queue on error or None """ cached_q_dict = q_dict[:] cached_q_query = q_queryset.all() try: middleware(q_dict, q_queryset, logger) except: logger.error('MM_STACK: Middleware exception occurred in %s' % middleware.__name__) return [cached_q_dict, cached_q_query] return None
9d01c51e19702ba4bc0ae155f0b9b386a4d947b6
383
def collate_with_neg_fn(generator): """Collate a list of datapoints into a batch, with negative samples in last half of batch.""" users, items, item_attr, num_attr = collate_fn(generator) users[len(users) // 2:] = users[:len(users) // 2] return users, items, item_attr, num_attr
189104ba993e522a1a5f7b40dfcaa06b25e69966
384
def build_scenario_3(FW, verbosity=None): """ Tests if override is cleared when all switch behaviours go out of scope. And tests switch command with opaque value. Returns a list of 2-lists: [time, 0ary function] that describes exactly what needs to be executed when. The 0ary functions return a falsey value when it succeeded, and a string describing what went wrong else. """ def setup_scenario_3(): sendBehaviour(0, buildTwilight(9, 14, 80)) sendBehaviour(1, buildSwitchBehaviour(9, 12, 70)) scenario = TestScenario(FW, "scenario 3") add_common_setup(scenario) scenario.addEvent(setup_scenario_3) if verbosity is not None: scenario.setVerbosity(verbosity) # behaviours both become active scenario.setTime(9, 0) scenario.addExpect("SwitchAggregator", "overrideState", "-1", "overridestate should've been set to translucent") scenario.addExpect("SwitchAggregator", "aggregatedState", "70", "aggregatedState should be equal to minimum of active behaviour and twilight") # switch command occurs scenario.setTime(10, 0) scenario.addEvent(bind(sendSwitchCommand, 50)) scenario.addExpect("SwitchAggregator", "overrideState", "50", "overridestate should've been set to translucent") scenario.setTime(10, 0) scenario.addExpect("SwitchAggregator", "aggregatedState", "50", "aggregatedState should be equal to override state when it is opaque") # all behaviours become inactive scenario.setTime(12, 0) scenario.addExpect("SwitchAggregator", "overrideState", "-1", "overridestate should've been cleared when it is non-zero and all switch behaviours become inactive") scenario.addExpect("SwitchAggregator", "aggregatedState", "0", "aggregatedState should be equal to 0 when no override state or switch behaviours are active") return scenario
a676d7997affcb92ea19dc007a8c38eebc919af3
385
def read(input): """Read an entire zonefile, returning an AST for it which contains formatting information.""" return _parse(input, actions=Actions())
46cbef9f1b5f85705166ec0527f60e8346157955
386
def generate_conditionally(text='welcome', random_seed=1, **kwargs): """ Input: text - str random_seed - integer Output: stroke - numpy 2D-array (T x 3) """ model = ConditionalStrokeModel.load( str(MODEL_DIR / 'conditional-stroke-model'), batch_size=1, rnn_steps=1, is_train=False, char_seq_len=len(text) + 1) return conditional_decode(model, seed=random_seed, text=text, **kwargs)
e58a06fc620a71e6ff0704bfe5cae3693ef5f758
387
import torch def cross_entropy(pred, soft_targets): """ pred: unscaled logits soft_targets: target-distributions (i.e., sum to 1) """ logsoftmax = nn.LogSoftmax(dim=1) return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1))
1a81e36a9839600bd621ec0e3bb0da1d5fca0c0a
388
def get_credentials(_globals: dict): """ Gets Credentials from Globals Structure may be found in modules/ducktests/tests/checks/utils/check_get_credentials.py This function return default username and password, defaults may be overriden throw globals """ if USERNAME_KEY in _globals[AUTHENTICATION_KEY] and PASSWORD_KEY in _globals[AUTHENTICATION_KEY]: return _globals[AUTHENTICATION_KEY][USERNAME_KEY], _globals[AUTHENTICATION_KEY][PASSWORD_KEY] return DEFAULT_AUTH_USERNAME, DEFAULT_AUTH_PASSWORD
2ca95af842f1e68eb31b452374adec4d0b830383
391
def hideablerevs(repo): """Revision candidates to be hidden This is a standalone function to allow extensions to wrap it. Because we use the set of immutable changesets as a fallback subset in branchmap (see mercurial.branchmap.subsettable), you cannot set "public" changesets as "hideable". Doing so would break multiple code assertions and lead to crashes.""" return obsolete.getrevs(repo, 'obsolete')
caf59496abeb0f6d42063509f3357c7520a90d82
392
import torch def squeeze_features(protein): """Remove singleton and repeated dimensions in protein features.""" protein["aatype"] = torch.argmax(protein["aatype"], dim=-1) for k in [ "domain_name", "msa", "num_alignments", "seq_length", "sequence", "superfamily", "deletion_matrix", "resolution", "between_segment_residues", "residue_index", "template_all_atom_mask", ]: if k in protein: final_dim = protein[k].shape[-1] if isinstance(final_dim, int) and final_dim == 1: if torch.is_tensor(protein[k]): protein[k] = torch.squeeze(protein[k], dim=-1) else: protein[k] = np.squeeze(protein[k], axis=-1) for k in ["seq_length", "num_alignments"]: if k in protein: protein[k] = protein[k][0] return protein
05c1a174935f7ebe845a0a3b308ca933baccfde6
393
import pathlib def get_cache_dir(app_name: str, suffix: str = None, create: bool = True): """Get a local cache directory for a given application name. Args: app_name: The name of the application. suffix: A subdirectory appended to the cache dir. create: Whether to create the directory and its parents if it does not already exist. """ appdirs = _import_appdirs() if appdirs is None: raise ImportError( "To use `dm.utils.fs.get_cache_dir()`, you must have `appdirs` " "installed: `conda install appdirs`." ) cache_dir = pathlib.Path(appdirs.user_cache_dir(appname=app_name)) if suffix is not None: cache_dir /= suffix if create: cache_dir.mkdir(exist_ok=True, parents=True) return cache_dir
40db55ce5d891a5cd496d23760ac66fe23206ed7
394
import random import pickle def _dump_test_data(filename, num_per_type=10): """Get corpus of statements for testing that has a range of stmt types.""" sp = signor.process_from_web() # Group statements by type stmts_by_type = defaultdict(list) for stmt in sp.statements: stmts_by_type[stmt.__class__].append(stmt) # Sample statements of each type (without replacement) stmt_sample = [] for stmt_type, stmt_list in stmts_by_type.items(): if len(stmt_list) <= num_per_type: stmt_sample.extend(stmt_list) else: stmt_sample.extend(random.sample(stmt_list, num_per_type)) # Make a random binary class vector for the stmt list y_arr = [random.choice((0, 1)) for s in stmt_sample] with open(test_stmt_path, 'wb') as f: pickle.dump((stmt_sample, y_arr), f) return stmt_sample
4eb2fbcfc6524d3f10c92e13f01475834f26f7f2
395
def gin_dict_parser(coll): """ Use for parsing collections that may contain a 'gin' key. The 'gin' key is assumed to map to either a dict or str value that contains gin bindings. e.g. {'gin': {'Classifier.n_layers': 2, 'Classifier.width': 3}} or {'gin': 'Classifier.n_layers = 2\nClassifier.width = 3'} """ if 'gin' in coll: if is_mapping(coll['gin']): gin.parse_config("".join(map(lambda t: f'{t[0]} = {t[1]}\n', iteritems(coll['gin'])))) elif isinstance(coll['gin'], str): gin.parse_config(coll['gin']) return coll
d47fa1785948d70e5bf4575ed879fe37827db6ba
396
def ones(shape, dtype): """ Declare a new worker-local tensor with all elements initialized to one. :param shape: the tensor shape :param dtype: the tensor data type :return: the tensor expression """ np_dtype = DType(dtype).as_numpy() init = _ConstTensor(np.ones(shape, dtype=np_dtype)) return LocalTensor(init)
7345dad51739c1ada5dfd89ae9c0d0b21df54ce8
397
def _valid_url(url): """Checks that the given URL is Discord embed friendly. Or at least, it tries.""" def _valid_string(segment, main=True): if not len(segment): return False for c in [ord(it.lower()) for it in segment]: if not (97 <= c <= 122 or (main and (48 <= c <= 57 or c == 45))): return False return True test = urlparse(url) if not (test.scheme and test.netloc and '.' in test.netloc): return False # Discord only accepts http or https if test.scheme not in ('http', 'https'): return False # Test for valid netloc netloc_split = test.netloc.split('.') if (len(netloc_split) < 2): return False # http://foo tld = test.netloc.split('.')[-1] if not (len(tld) >= 2 and _valid_string(tld, main=False)): return False # http://foo.123 for segment in netloc_split[:-1]: if not _valid_string(segment): return False # http://foo..bar or http://fo*o.bar for c in url: if not 33 <= ord(c) <= 126: return False # non-ASCII only URLs return True
74d359bc2c8430fc5990cead6695626ea825db64
398
def isText(node): """ Returns True if the supplied node is free text. """ return node.nodeType == node.TEXT_NODE
150efc016028d0fab4630ad5e754ebaeed0c82c0
399
def _parse_descriptor(desc: str, ctx: '_ParseDescriptorContext') -> 'Descriptor': """ :meta private: Parse a descriptor given the context level we are in. Used recursively to parse subdescriptors :param desc: The descriptor string to parse :param ctx: The :class:`_ParseDescriptorContext` indicating the level we are in :return: The parsed descriptor :raises: ValueError: if the descriptor is malformed """ func, expr = _get_func_expr(desc) if func == "pk": pubkey, expr = parse_pubkey(expr) if expr: raise ValueError("more than one pubkey in pk descriptor") return PKDescriptor(pubkey) if func == "pkh": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH or ctx == _ParseDescriptorContext.P2WSH): raise ValueError("Can only have pkh at top level, in sh(), or in wsh()") pubkey, expr = parse_pubkey(expr) if expr: raise ValueError("More than one pubkey in pkh descriptor") return PKHDescriptor(pubkey) if func == "sortedmulti" or func == "multi": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH or ctx == _ParseDescriptorContext.P2WSH): raise ValueError("Can only have multi/sortedmulti at top level, in sh(), or in wsh()") is_sorted = func == "sortedmulti" comma_idx = expr.index(",") thresh = int(expr[:comma_idx]) expr = expr[comma_idx + 1:] pubkeys = [] while expr: pubkey, expr = parse_pubkey(expr) pubkeys.append(pubkey) if len(pubkeys) == 0 or len(pubkeys) > 16: raise ValueError("Cannot have {} keys in a multisig; must have between 1 and 16 keys, inclusive".format(len(pubkeys))) elif thresh < 1: raise ValueError("Multisig threshold cannot be {}, must be at least 1".format(thresh)) elif thresh > len(pubkeys): raise ValueError("Multisig threshold cannot be larger than the number of keys; threshold is {} but only {} keys specified".format(thresh, len(pubkeys))) if ctx == _ParseDescriptorContext.TOP and len(pubkeys) > 3: raise ValueError("Cannot have {} pubkeys in bare multisig: only at most 3 pubkeys") return MultisigDescriptor(pubkeys, thresh, is_sorted) if func == "wpkh": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH): raise ValueError("Can only have wpkh() at top level or inside sh()") pubkey, expr = parse_pubkey(expr) if expr: raise ValueError("More than one pubkey in pkh descriptor") return WPKHDescriptor(pubkey) if func == "sh": if ctx != _ParseDescriptorContext.TOP: raise ValueError("Can only have sh() at top level") subdesc = _parse_descriptor(expr, _ParseDescriptorContext.P2SH) return SHDescriptor(subdesc) if func == "wsh": if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH): raise ValueError("Can only have wsh() at top level or inside sh()") subdesc = _parse_descriptor(expr, _ParseDescriptorContext.P2WSH) return WSHDescriptor(subdesc) if func == "tr": if ctx != _ParseDescriptorContext.TOP: raise ValueError("Can only have tr at top level") internal_key, expr = parse_pubkey(expr) subscripts = [] depths = [] if expr: # Path from top of the tree to what we're currently processing. # branches[i] == False: left branch in the i'th step from the top # branches[i] == true: right branch branches = [] while True: # Process open braces while True: try: expr = _get_const(expr, "{") branches.append(False) except ValueError: break if len(branches) > MAX_TAPROOT_NODES: raise ValueError("tr() supports at most {MAX_TAPROOT_NODES} nesting levels") # Process script expression sarg, expr = _get_expr(expr) subscripts.append(_parse_descriptor(sarg, _ParseDescriptorContext.P2TR)) depths.append(len(branches)) # Process closing braces while len(branches) > 0 and branches[-1]: expr = _get_const(expr, "}") branches.pop() # If we're at the end of a left branch, expect a comma if len(branches) > 0 and not branches[-1]: expr = _get_const(expr, ",") branches[-1] = True if len(branches) == 0: break return TRDescriptor(internal_key, subscripts, depths) if ctx == _ParseDescriptorContext.P2SH: raise ValueError("A function is needed within P2SH") elif ctx == _ParseDescriptorContext.P2WSH: raise ValueError("A function is needed within P2WSH") raise ValueError("{} is not a valid descriptor function".format(func))
7c755482ba31ce656ae597eeb166de9cfaa2f649
400
def get_editable_fields(cc_content, context): """ Return the set of fields that the requester can edit on the given content """ # For closed thread: # no edits, except 'abuse_flagged' and 'read' are allowed for thread # no edits, except 'abuse_flagged' is allowed for comment ret = {"abuse_flagged"} if cc_content["type"] == "thread" and cc_content["closed"]: ret |= {"read"} return ret if cc_content["type"] == "comment" and context["thread"]["closed"]: return ret # Shared fields ret |= {"voted"} if _is_author_or_privileged(cc_content, context): ret |= {"raw_body"} # Thread fields if cc_content["type"] == "thread": ret |= {"following", "read"} if _is_author_or_privileged(cc_content, context): ret |= {"topic_id", "type", "title"} if context["is_requester_privileged"] and context["discussion_division_enabled"]: ret |= {"group_id"} # Comment fields if ( cc_content["type"] == "comment" and ( context["is_requester_privileged"] or ( _is_author(context["thread"], context) and context["thread"]["thread_type"] == "question" ) ) ): ret |= {"endorsed"} return ret
19cea27fdda79b365c25329851bb7baf8d18bcac
401
def rate_of_matrix_function(A, Adot, f, fprime): """Find the rate of the tensor A Parameters ---------- A : ndarray (3,3) A diagonalizable tensor Adot : ndarray (3,3) Rate of A f : callable fprime : callable Derivative of f Returns ------- Ydot : ndarray (3,3) Notes ----- For a diagonalizable tensor A (the strain) which has a quasi-arbitrary spectral expansion .. math:: A = \sum_{i=1}^3 \lambda_i P_{i} and if a second tensor Y is a principal function of A, defined by .. math:: Y = \sum_{i=1}^3 f(\lambda_i) P_i, compute the time rate \dot{Y}. Algorithm taken from Brannon's Tensor book, from the highlighted box near Equation (28.404) on page 550. """ # Compute the eigenvalues and eigenprojections. eig_vals, eig_vecs = np.linalg.eig(A) eig_projs = [np.outer(eig_vecs[:, i], eig_vecs[:, i]) for i in [0, 1, 2]] # Assemble the rate of Y. Ydot = np.zeros((3, 3)) for eigi, proji in zip(eig_vals, eig_projs): for eigj, projj in zip(eig_vals, eig_projs): if eigi == eigj: gamma = fprime(eigi) else: gamma = (f(eigi) - f(eigj)) / (eigi - eigj) Ydot += gamma * np.dot(proji, np.dot(Adot, projj)) return Ydot
404d189b2bc6cc91c30ef857a8ebef7cc0db49d9
402
def enumerate_changes(levels): """Assign a unique integer to each run of identical values. Repeated but non-consecutive values will be assigned different integers. """ return levels.diff().fillna(0).abs().cumsum().astype(int)
4787c0e84d6bca8f6038389e5bebf74317059ed8
403
def TDataStd_ByteArray_Set(*args): """ * Finds or creates an attribute with the array. If <isDelta> == False, DefaultDeltaOnModification is used. If attribute is already set, all input parameters are refused and the found attribute is returned. :param label: :type label: TDF_Label & :param lower: :type lower: int :param upper: :type upper: int :param isDelta: default value is Standard_False :type isDelta: bool :rtype: Handle_TDataStd_ByteArray """ return _TDataStd.TDataStd_ByteArray_Set(*args)
a0f3402e1106021affb3dfe12fe93c5ae8ed2dad
404
def _get_total_elements(viewer) -> int: """ We need to fetch a workflows listing to figure out how many entries we have in the database, since the API does not contain a method to count the DB entries. :param viewer: CWL Viewer instance URL :return: number of total elements in the CWL Viewer instance DB """ smallest_workflow_dataset: dict = _fetch_workflows_data(viewer, 0, 1).json() return int(smallest_workflow_dataset['totalElements'])
a7289ed13546b68e381793e0fdd8410f986f87d4
405
def entrepreneursIncubated(dateFrom=None, dateTo=None): """ Returns all entrepreneurs ages count between a set of ranges """ queryset = Stage.objects output = { 'queryset': None, 'fields': [], 'values': [], 'fieldLabels': [], } queryset = queryset.filter(stage_type="IN") # check for duplicated projects = Project.objects.filter(id__in=queryset.values('project_id')) entrepreneurs = Entrepreneur.objects.filter(id__in=projects.values('entrepreneurs')) output['queryset'] = entrepreneurs fieldsDict = helperDictionaries.getModelReportFields('entrepreneurs') output['fieldDict'] = fieldsDict output['fields'] = [*fieldsDict.keys()] output['fieldLabels'] = [*fieldsDict.values()] return output
5b06dc8a9ca15357ecab20e615d329fcaaffc8d8
406
def get_steps(x, shape): """ Convert a (vocab_size, steps * batch_size) array into a [(vocab_size, batch_size)] * steps list of views """ steps = shape[1] if x is None: return [None for step in range(steps)] xs = x.reshape(shape + (-1,)) return [xs[:, step, :] for step in range(steps)]
44133ddd1ad78b3ea05042c6c16558bb982c9206
407
def LHS( a: int, operation1: str, b: int, operation2: str, c: float ): """ E.g. LHS(a, 'plus', b, 'times', c) does (a + b) * c params: a: int. First number in equation operation1: str. Must be 'plus', 'minus', 'times', 'divide' b : int. Second number in equation operation2: str. Must be 'plus', 'minus', 'times', 'divide' c: float. Third number in equation return: int """ step_1 = word_function(a, operation1, b) step_2 = word_function(step_1, operation2, c) return step_2
af023f3cd70d123492c8a6abb92d7ae2994b56ae
408
import math def _validate(api_indicator_matype, option, parameters:dict, **kwargs): # -> dict """Validates kwargs and attaches them to parameters.""" # APO, PPO, BBANDS matype = int(math.fabs(kwargs["matype"])) if "matype" in kwargs else None if option == "matype" and matype is not None and matype in api_indicator_matype: parameters["matype"] = matype # BBANDS nbdevup = math.fabs(kwargs["nbdevup"]) if "nbdevup" in kwargs else None nbdevdn = math.fabs(kwargs["nbdevdn"]) if "nbdevdn" in kwargs else None if option == "nbdevup" and nbdevup is not None: parameters["nbdevup"] = nbdevup if option == "nbdevdn" and nbdevdn is not None: parameters["nbdevdn"] = nbdevdn # ULTOSC timeperiod1 = int(math.fabs(kwargs["timeperiod1"])) if "timeperiod1" in kwargs else None timeperiod2 = int(math.fabs(kwargs["timeperiod2"])) if "timeperiod2" in kwargs else None timeperiod3 = int(math.fabs(kwargs["timeperiod3"])) if "timeperiod3" in kwargs else None if option == "timeperiod1" and timeperiod1 is not None: parameters["timeperiod1"] = timeperiod1 if option == "timeperiod2" and timeperiod2 is not None: parameters["timeperiod2"] = timeperiod2 if option == "timeperiod3" and timeperiod3 is not None: parameters["timeperiod3"] = timeperiod3 # SAR acceleration = math.fabs(float(kwargs["acceleration"])) if "acceleration" in kwargs else None maximum = math.fabs(float(kwargs["maximum"])) if "maximum" in kwargs else None if option == "acceleration" and acceleration is not None: parameters["acceleration"] = acceleration if option == "maximum" and maximum is not None: parameters["maximum"] = maximum # MAMA fastlimit = math.fabs(float(kwargs["fastlimit"])) if "fastlimit" in kwargs else None slowlimit = math.fabs(float(kwargs["slowlimit"])) if "slowlimit" in kwargs else None if option == "fastlimit" and fastlimit is not None and fastlimit > 0 and fastlimit < 1: parameters["fastlimit"] = fastlimit if option == "slowlimit" and slowlimit is not None and slowlimit > 0 and slowlimit < 1: parameters["slowlimit"] = slowlimit # MACD, APO, PPO, ADOSC fastperiod = int(math.fabs(kwargs["fastperiod"])) if "fastperiod" in kwargs else None slowperiod = int(math.fabs(kwargs["slowperiod"])) if "slowperiod" in kwargs else None signalperiod = int(math.fabs(kwargs["signalperiod"])) if "signalperiod" in kwargs else None if option == "fastperiod" and fastperiod is not None: parameters["fastperiod"] = fastperiod if option == "slowperiod" and slowperiod is not None: parameters["slowperiod"] = slowperiod if option == "signalperiod" and signalperiod is not None: parameters["signalperiod"] = signalperiod # MACDEXT fastmatype = int(math.fabs(kwargs["fastmatype"])) if "fastmatype" in kwargs else None slowmatype = int(math.fabs(kwargs["slowmatype"])) if "slowmatype" in kwargs else None signalmatype = int(math.fabs(kwargs["signalmatype"])) if "signalmatype" in kwargs else None if option == "fastmatype" and fastmatype is not None and fastmatype in api_indicator_matype: parameters["fastmatype"] = fastmatype if option == "slowmatype" and slowmatype is not None and slowmatype in api_indicator_matype: parameters["slowmatype"] = slowmatype if option == "signalmatype" and signalmatype is not None and signalmatype in api_indicator_matype: parameters["signalmatype"] = signalmatype # STOCH(F), STOCHRSI fastkperiod = int(math.fabs(kwargs["fastkperiod"])) if "fastkperiod" in kwargs else None fastdperiod = int(math.fabs(kwargs["fastdperiod"])) if "fastdperiod" in kwargs else None fastdmatype = int(math.fabs(kwargs["fastdmatype"])) if "fastdmatype" in kwargs else None if option == "fastkperiod" and fastkperiod is not None: parameters["fastkperiod"] = fastkperiod if option == "fastdperiod" and fastdperiod is not None: parameters["fastdperiod"] = fastdperiod if option == "fastdmatype" and fastdmatype is not None and fastdmatype in api_indicator_matype: parameters["fastdmatype"] = fastdmatype # STOCH(F), STOCHRSI slowkperiod = int(math.fabs(kwargs["slowkperiod"])) if "slowkperiod" in kwargs else None slowdperiod = int(math.fabs(kwargs["slowdperiod"])) if "slowdperiod" in kwargs else None slowkmatype = int(math.fabs(kwargs["slowkmatype"])) if "slowkmatype" in kwargs else None slowdmatype = int(math.fabs(kwargs["slowdmatype"])) if "slowdmatype" in kwargs else None if option == "slowkperiod" and slowkperiod is not None: parameters["slowkperiod"] = slowkperiod if option == "slowdperiod" and slowdperiod is not None: parameters["slowdperiod"] = slowdperiod if option == "slowkmatype" and slowkmatype is not None and slowkmatype in api_indicator_matype: parameters["slowkmatype"] = slowkmatype if option == "slowdmatype" and slowdmatype is not None and slowdmatype in api_indicator_matype: parameters["slowdmatype"] = slowdmatype return parameters
d73903514aa87f854d08e3447cca85f64eaa4b31
409
def scale_y_values(y_data, y_reference, y_max): """ Scale the plot in y direction, to prevent extreme values. :param y_data: the y data of the plot :param y_reference: the maximum value of the plot series (e.g. Normal force), which will be scaled to y_max :param y_max: the maximum y value for the plot (e.g. if y_max=1, no y value in the plot will be greater than 1) """ multipl_factor = y_max / y_reference for i in range(len(y_data)): y_data[i] = y_data[i] * multipl_factor return y_data, multipl_factor
b3b22b0f868ce46926a4eecfc1c5d0ac2a7c1f7e
410
def set_heating_contribution(agent, pv_power): """ If the water tank is currently in use, compute and return the part of the pv_power used for heating the water""" pv_power_to_heating = 0 if agent.water_tank.is_active(): pv_power_to_heating = pv_power * agent.pv_panel.heating_contribution return pv_power_to_heating
ece29b7f0fbbe10907ada8fd1450919f01ab74c3
411
import tqdm def predict_direction(clf, tickers, **kwargs): """ Use clf (an untrained classifier) to predict direction of change for validation data for each stock in 'tickers'. Pass additional keyword arguments to be used in building the stock datasets. Args: --clf: An untrained sklearn classifier --tickers: A list of tickers to use --kwargs: Additional arguments for the StockDataset class Returns: A dictionary where each key is a ticker in 'tickers' and each value is the accuracy for the predictions for that ticker. """ results = {} for ticker in tqdm(tickers): # Build and split dataset ds = StockDataset(tickers=ticker, quiet=True, **kwargs) t_data, v_data, t_label, v_label = ds.split(label_field='Direction') # Clone classifier clf_clone = sklearn.base.clone(clf) # Fit classifier to data clf_clone.fit(t_data, t_label) # Predict and store results v_pred = clf_clone.predict(v_data) results[ticker] = mymetrics.direction_accuracy(v_label, v_pred) return results
798e25d3b652227407b50e8eec9f0289770d9d9a
412
def parse_primary(index): """Parse primary expression.""" if token_is(index, token_kinds.open_paren): node, index = parse_expression(index + 1) index = match_token(index, token_kinds.close_paren, ParserError.GOT) return expr_nodes.ParenExpr(node), index elif token_is(index, token_kinds.number): return expr_nodes.Number(p.tokens[index]), index + 1 elif (token_is(index, token_kinds.identifier) and not p.symbols.is_typedef(p.tokens[index])): return expr_nodes.Identifier(p.tokens[index]), index + 1 elif token_is(index, token_kinds.string): return expr_nodes.String(p.tokens[index].content), index + 1 elif token_is(index, token_kinds.char_string): chars = p.tokens[index].content return expr_nodes.Number(chars[0]), index + 1 else: raise_error("expected expression", index, ParserError.GOT)
2413a0793062e2cfa52fb8d922c21a3af7d06a66
414
def chopper_pulses_of_mode(i): """How many single pulses the chopper transmits per opening, or in hybrid mode, how many single bunches the tranmitted intensity corresponds to, based on the current settings of the chopper. i: 0-based integer""" if isnan(i) or i<0 or i>=len(chopper.pulses): return nan return chopper.pulses[int(i)]
c10ade662b515cfdde721b9c7c8cb2aac7fa8c03
415
def _get_content_from_tag(tag): """Gets the content from tag till before a new section.""" contents = [] next_tag = tag while next_tag and not _is_section(next_tag): content = parse_content(next_tag.text()) if content: contents.append(content) next_tag = next_tag.next return ' '.join(contents)
832f01b7db2a5c2cdcc3454b1253a8399464952e
416
async def get_connections(request: data_models.ConnectionsRequest): """Get connections *from* and *to* each entity in the request. Connections *to* are all the subject-predicate pairs where the entity is the object, and connections *from* are all the predicate-object pairs where the entity is the subject.""" response = {} for ent in request.entities: ent_normalised = utils.normaliseURI(ent) connections_from = sparql_connector.get_sparql_results( sparql.get_p_o(ent_normalised, labels=request.labels, limit=request.limit) )["results"]["bindings"] connections_to = sparql_connector.get_sparql_results( sparql.get_s_p(ent_normalised, labels=request.labels, limit=request.limit) )["results"]["bindings"] for predicate_object_dict in connections_from: if ( "collections.vam.ac.uk" in predicate_object_dict["object"]["value"] ) and "objectLabel" not in predicate_object_dict: object_label = utils.get_vam_object_title( predicate_object_dict["object"]["value"] ) if object_label is not None: predicate_object_dict["objectLabel"] = dict() predicate_object_dict["objectLabel"]["type"] = "literal" predicate_object_dict["objectLabel"]["value"] = object_label for subject_predicate_dict in connections_to: if ( "collections.vam.ac.uk" in subject_predicate_dict["subject"]["value"] ) and "subjectLabel" not in subject_predicate_dict: subject_label = utils.get_vam_object_title( subject_predicate_dict["subject"]["value"] ) if subject_label is not None: subject_predicate_dict["subjectLabel"] = dict() subject_predicate_dict["subjectLabel"]["type"] = "literal" subject_predicate_dict["subjectLabel"]["value"] = subject_label response.update( { ent: { "from": connections_from, "to": connections_to, } } ) return response
e25a363cbd4cbbaa7a2f36132f73fcfa2ebd1d3c
417
def sunlight_duration(hour_angle_sunrise): """Returns the duration of Sunlight, in minutes, with Hour Angle in degrees, hour_angle.""" sunlight_durration = 8 * hour_angle_sunrise # this seems like the wrong output return sunlight_durration
b2887dd86caf25e7cac613bfa10b4de26c932c09
418
import warnings def add_particle_bunch_gaussian(sim, q, m, sig_r, sig_z, n_emit, gamma0, sig_gamma, n_physical_particles, n_macroparticles, tf=0., zf=0., boost=None, save_beam=None, z_injection_plane=None, initialize_self_field=True): """ Introduce a relativistic Gaussian particle bunch in the simulation, along with its space charge field. The bunch is initialized with a normalized emittance `n_emit`, in such a way that it will be focused at time `tf`, at the position `zf`. Thus if `tf` is not 0, the bunch will be initially out of focus. (This does not take space charge effects into account.) Parameters ---------- sim : a Simulation object The structure that contains the simulation. q : float (in Coulomb) Charge of the particle species m : float (in kg) Mass of the particle species sig_r : float (in meters) The transverse RMS bunch size. sig_z : float (in meters) The longitudinal RMS bunch size. n_emit : float (in meters) The normalized emittance of the bunch. gamma0 : float The Lorentz factor of the electrons. sig_gamma : float The absolute energy spread of the bunch. n_physical_particles : float The number of physical particles (e.g. electrons) the bunch should consist of. n_macroparticles : int The number of macroparticles the bunch should consist of. zf: float (in meters), optional Position of the focus. tf : float (in seconds), optional Time at which the bunch reaches focus. boost : a BoostConverter object, optional A BoostConverter object defining the Lorentz boost of the simulation. save_beam : string, optional Saves the generated beam distribution as an .npz file "string".npz z_injection_plane: float (in meters) or None When `z_injection_plane` is not None, then particles have a ballistic motion for z<z_injection_plane. This is sometimes useful in boosted-frame simulations. `z_injection_plane` is always given in the lab frame. initialize_self_field: bool, optional Whether to calculate the initial space charge fields of the bunch and add these fields to the fields on the grid (Default: True) """ # Generate Gaussian gamma distribution of the beam if sig_gamma > 0.: gamma = np.random.normal(gamma0, sig_gamma, n_macroparticles) else: # Zero energy spread beam gamma = np.full(n_macroparticles, gamma0) if sig_gamma < 0.: warnings.warn( "Negative energy spread sig_gamma detected." " sig_gamma will be set to zero. \n") # Get inverse gamma inv_gamma = 1. / gamma # Get Gaussian particle distribution in x,y,z x = sig_r * np.random.normal(0., 1., n_macroparticles) y = sig_r * np.random.normal(0., 1., n_macroparticles) z = zf + sig_z * np.random.normal(0., 1., n_macroparticles) # Define sigma of ux and uy based on normalized emittance sig_ur = (n_emit / sig_r) # Get Gaussian distribution of transverse normalized momenta ux, uy ux = sig_ur * np.random.normal(0., 1., n_macroparticles) uy = sig_ur * np.random.normal(0., 1., n_macroparticles) # Finally we calculate the uz of each particle # from the gamma and the transverse momenta ux, uy uz_sqr = (gamma ** 2 - 1) - ux ** 2 - uy ** 2 # Check for unphysical particles with uz**2 < 0 mask = uz_sqr >= 0 N_new = np.count_nonzero(mask) if N_new < n_macroparticles: warnings.warn( "Particles with uz**2<0 detected." " %d Particles will be removed from the beam. \n" "This will truncate the distribution of the beam" " at gamma ~= 1. \n" "However, the charge will be kept constant. \n"%(n_macroparticles - N_new)) # Remove unphysical particles with uz**2 < 0 x = x[mask] y = y[mask] z = z[mask] ux = ux[mask] uy = uy[mask] inv_gamma = inv_gamma[mask] uz_sqr = uz_sqr[mask] # Calculate longitudinal momentum of the bunch uz = np.sqrt(uz_sqr) # Get weight of each particle w = n_physical_particles / N_new * np.ones_like(x) # Propagate distribution to an out-of-focus position tf. # (without taking space charge effects into account) if tf != 0.: x = x - ux * inv_gamma * c * tf y = y - uy * inv_gamma * c * tf z = z - uz * inv_gamma * c * tf # Save beam distribution to an .npz file if save_beam is not None: np.savez(save_beam, x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, inv_gamma=inv_gamma, w=w) # Add the electrons to the simulation ptcl_bunch = add_particle_bunch_from_arrays(sim, q, m, x, y, z, ux, uy, uz, w, boost=boost, z_injection_plane=z_injection_plane, initialize_self_field=initialize_self_field) return ptcl_bunch
3aedae4d9ff7ec6026b3153946939858db48d332
419
def sell_shares_nb(cash_now, shares_now, size, direction, price, fees, fixed_fees, slippage, min_size, allow_partial, raise_reject, log_record, log): """Sell shares.""" # Get optimal order size if direction == Direction.LongOnly: final_size = min(shares_now, size) else: final_size = size # Check against minimum size if abs(final_size) < min_size: if raise_reject: raise RejectedOrderError("Order rejected: Final size is less than minimum allowed") return order_not_filled_nb( cash_now, shares_now, OrderStatus.Rejected, StatusInfo.MinSizeNotReached, log_record, log) # Check against partial fill if np.isfinite(size) and is_less_nb(final_size, size) and not allow_partial: # np.inf doesn't count if raise_reject: raise RejectedOrderError("Order rejected: Final size is less than requested") return order_not_filled_nb( cash_now, shares_now, OrderStatus.Rejected, StatusInfo.PartialFill, log_record, log) # Get price adjusted with slippage adj_price = price * (1 - slippage) # Compute acquired cash acq_cash = final_size * adj_price # Update fees fees_paid = acq_cash * fees + fixed_fees # Get final cash by subtracting costs if is_less_nb(acq_cash, fees_paid): # Can't fill if raise_reject: raise RejectedOrderError("Order rejected: Fees cannot be covered") return order_not_filled_nb( cash_now, shares_now, OrderStatus.Rejected, StatusInfo.CantCoverFees, log_record, log) final_cash = acq_cash - fees_paid # Update current cash and shares new_cash = cash_now + final_cash new_shares = add_nb(shares_now, -final_size) # Return filled order order_result = OrderResult( final_size, adj_price, fees_paid, OrderSide.Sell, OrderStatus.Filled, -1 ) if log: fill_res_log_nb(new_cash, new_shares, order_result, log_record) return new_cash, new_shares, order_result
cf16446421ae19aa7f1d142ee195d5d9dfa20bdf
420
import traceback def handler_no_answer(f): """Decorator that creates message handlers that don't reply.""" def handle_wrapper(*args, **kwds): answer = None try: f(*args, **kwds) except Exception: return MSG_STATUS_ERROR, [ 'Calling the cmd handler caused an error:\n{}'.format(traceback.format_exc()) ], {} return handle_wrapper
79f1e0d30eab4d7beb500a3a75f7b7e4415e311c
422
import re def wrapper_handle_attrs(func): """转化html的标签属性为字典""" # 这是一个装饰Parsing.handle_attrs_tmp、Parsing.handle_attrs_tag的装饰器 def handle_attrs(self, attrs_str): attrs = dict() if attrs_str == '/': return attrs attrs_list = re.findall(self.attr_reg, attrs_str) for attr in attrs_list: attrs[attr[0]] = func(self, attr) return attrs return handle_attrs
d7396433c9721c26c8d419d4e78f2b8445f5dd70
423
def transfer_weights(model, weights=None): """ Always trains from scratch; never transfers weights :param model: :param weights: :return: """ print('ENet has found no compatible pretrained weights! Skipping weight transfer...') return model
2b8b5e7d3ad72deea42ffccea6a561eac3b72320
424
def collapse_json(text, indent=4): """Compacts a string of json data by collapsing whitespace after the specified indent level NOTE: will not produce correct results when indent level is not a multiple of the json indent level """ initial = " " * indent out = [] # final json output sublevel = [] # accumulation list for sublevel entries pending = None # holder for consecutive entries at exact indent level for line in text.splitlines(): if line.startswith(initial): if line[indent] == " ": # found a line indented further than the indent level, so add # it to the sublevel list if pending: # the first item in the sublevel will be the pending item # that was the previous line in the json sublevel.append(pending) pending = None item = line.strip() sublevel.append(item) if item.endswith(","): sublevel.append(" ") elif sublevel: # found a line at the exact indent level *and* we have sublevel # items. This means the sublevel items have come to an end sublevel.append(line.strip()) out.append("".join(sublevel)) sublevel = [] else: # found a line at the exact indent level but no items indented # further, so possibly start a new sub-level if pending: # if there is already a pending item, it means that # consecutive entries in the json had the exact same # indentation and that last pending item was not the start # of a new sublevel. out.append(pending) pending = line.rstrip() else: if pending: # it's possible that an item will be pending but not added to # the output yet, so make sure it's not forgotten. out.append(pending) pending = None if sublevel: out.append("".join(sublevel)) out.append(line) return "\n".join(out)
625868ca90aab0be50cf6d2fdb2926d395d83301
425
import ast def get_skills_v1(): """ READING THE FIRST SKILLSET """ f = open('skills_v1.json', 'rb') for a in f: skills_v1 = ast.literal_eval(a) f.close() return skills_v1
c71b7ed4fc6579ea21a0aecceaf38be81a32964b
426
from typing import Tuple from typing import List from typing import Counter def create_mask(board: np.ndarray, dimensions: Tuple[int, int]) -> List[List[int]]: """ Function to create Mask of possible valid values based on the initial sudoku Board. """ mask = list(board.tolist()) counts = Counter(board.flatten()) del counts[0] counts = [number[0] for number in counts.most_common()] most_common_clues = counts for clue in range(dimensions[0], dimensions[1]): if clue not in most_common_clues: most_common_clues.append(clue) for i, row in enumerate(mask): if 0 in row: while 0 in row: zero_index = row.index(0) mask[i][zero_index] = [] for number in most_common_clues: if valid(board, number, (i, zero_index), box_size): mask[i][zero_index].append(number) else: for number in row: if number != 0: mask[i][row.index(number)] = {number} return mask
a8e4c68a55c96ad7502464934226f8909dbf18cd
427
def telegram(text: str, token: str, chat_id: int) -> str: """Send a telegram message""" webhookAddress = f"https://api.telegram.org/bot{token}/sendMessage?" + urlencode({"text":text, "chat_id":chat_id}) handler = urlopen(webhookAddress) return handler.read().decode('utf-8')
c80727f5e482b3e9bb48c28c3cc9e688228733fc
428
def match_term(term, dictionary, case_sensitive, lemmatize=True): """ Parameters ---------- term dictionary case_sensitive lemmatize Including lemmas improves performance slightly Returns ------- """ if (not case_sensitive and term.lower() in dictionary) or term in dictionary: return True if (case_sensitive and lemmatize) and term.rstrip('s').lower() in dictionary: return True elif (not case_sensitive and lemmatize) and term.rstrip('s') in dictionary: return True return False
aba706a211cf68e7c8c1668200da3f9c8613b3d2
429
import requests import json import csv def fill_user(user_ids, filename='user', write=True): """ Input: user_ids dictionary (user ids: task values) Output: csv file with user id, name, email """ emails = {} for user in user_ids: r = requests.get('https://pe.goodlylabs.org' '/api/user/{}?api_key={}&limit=100' .format(user, PYBOSSA_API_KEY), headers=headers) user_info = json.loads(r.text) emails[user] = [user_info['fullname'], user_info['email_addr']] if write: with open('{}.csv'.format(filename), 'w') as f: writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(["id", "name", "email"]) for i in emails: writer.writerow([i, emails[i][0], emails[i][1]]) return emails
0ee2eb7104ee177a13d78e6e05b6fb787736bb06
430
def project_along_flow(dX_raw,dY_raw,dX_prio,dY_prio,e_perp): """ Parameters ---------- dX_raw : np.array, size=(m,n), dtype=float raw horizontal displacement with mixed signal dY_raw : np.array, size=(m,n), dtype=float raw vertical displacement with mixed signal dX_prio : np.array, size=(m,n), dtype=float reference of horizontal displacement (a-priori knowledge) dY_prio : np.array, size=(m,n), dtype=float reference of vertical displacement (a-priori knowledge) e_perp : np.array, size=(2,1), float vector in the perpendicular direction to the flightline (bearing). Returns ------- dX_proj : np.array, size=(m,n), dtype=float projected horizontal displacement in the same direction as reference. dY_proj : np.array, size=(m,n), dtype=float projected vertical displacement in the same direction as reference. Notes ----- The projection function is as follows: .. math:: P = ({d_{x}}e^{\perp}_{x} - {d_{y}}e^{\perp}_{y}) / ({\hat{d}_{x}}e^{\perp}_{x} - {\hat{d}_{y}}e^{\perp}_{y}) See also Equation 10 and Figure 2 in [1]. Furthermore, two different coordinate system are used here: .. code-block:: text indexing | indexing ^ y system 'ij'| system 'xy' | | | | i | x --------+--------> --------+--------> | | | | image | j map | based v based | References ---------- .. [1] Altena & Kääb. "Elevation change and improved velocity retrieval using orthorectified optical satellite data from different orbits" Remote Sensing vol.9(3) pp.300 2017. """ # e_{\para} = bearing satellite... assert(dX_raw.size == dY_raw.size) # all should be of the same size assert(dX_prio.size == dY_prio.size) assert(dX_raw.size == dX_prio.size) d_proj = ((dX_raw*e_perp[0])-(dY_raw*e_perp[1])) /\ ((dX_prio*e_perp[0])-(dY_prio*e_perp[1])) dX_proj = d_proj * dX_raw dY_proj = d_proj * dY_raw return dX_proj,dY_proj
fe0565667b77954b1df07d7ea31cbb620b20f800
431
from typing import Mapping import select def get_existing_pks(engine: Engine, table: Table) -> Mapping[int, dict]: """ Creates an index of hashes of the values of the primary keys in the table provided. :param engine: :param table: :return: """ with engine.connect() as conn: pk_cols = [table.c[col.name] for col in table.columns if col.primary_key] query = select(pk_cols) result = conn.execute(query) return {hash_row_els(dict(row), [col.name for col in pk_cols]): dict(row) for row in result}
fead502b36f67c6732ba4b0e4e678af4fd96ed53
432
def create_transform_parameters( fill_mode = 'nearest', interpolation = 'linear', cval = 0, data_format = None, relative_translation = True, ): """ Creates a dictionary to store parameters containing information on method to apply transformation to an image # Arguments fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap' interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4' cval: Fill value to use with fill_mode='constant' data_format: Same as for keras.preprocessing.image_transform.apply_transform relative_translation: If true (the default), interpret translation as a factor of the image size. If false, interpret it as absolute pixels. """ # Apply processing to input arguments if data_format is None: data_format = 'channels_last' if data_format == 'channels_first': channel_axis = 0 elif data_format == 'channels_last': channel_axis = 2 else: raise ValueError("invalid data_format, expected 'channels_first' or 'channels_last', got '{}'".format(data_format)) if fill_mode == 'constant': cv_border_mode = cv2.BORDER_CONSTANT if fill_mode == 'nearest': cv_border_mode = cv2.BORDER_REPLICATE if fill_mode == 'reflect': cv_border_mode = cv2.BORDER_REFLECT_101 if fill_mode == 'wrap': cv_border_mode = cv2.BORDER_WRAP if interpolation == 'nearest': cv_interpolation = cv2.INTER_NEAREST if interpolation == 'linear': cv_interpolation = cv2.INTER_LINEAR if interpolation == 'cubic': cv_interpolation = cv2.INTER_CUBIC if interpolation == 'area': cv_interpolation = cv2.INTER_AREA if interpolation == 'lanczos4': cv_interpolation = cv2.INTER_LANCZOS4 # Create attribute dict to store parameters _p = AttrDict( fill_mode=fill_mode, interpolation=interpolation, cval=cval, relative_translation=relative_translation, data_format=data_format, channel_axis=channel_axis, cv_border_mode=cv_border_mode, cv_interpolation=cv_interpolation ) _p.immutable(True) return _p
2172c283b53d76881877b618043885aa596507d4
433
def error_rate(model, dataset): """Returns error rate for Keras model on dataset.""" d = dataset['dimension'] scores = np.squeeze(model.predict(dataset['features'][:, :, 0:d]), axis=-1) diff = scores[:, 0] - scores[:, 1] return np.mean(diff.reshape((-1)) <= 0)
b16b234ead64737eb2d40b3aab612270ed86dc0a
434
def account(): """Update the user's account""" return _templates.account(UserContext.user())
4624bcce3987e71edcd8d720eea22b52658c1352
436
from typing import Awaitable import asyncio def run_synchronously(computation: Awaitable[TSource]) -> TSource: """Runs the asynchronous computation and await its result.""" return asyncio.run(computation)
2aa14167a2de06a85862b0b0c9294c76fa8ed012
437
from datetime import datetime from typing import Optional from pydantic import BaseModel # noqa: E0611 import cmd from typing import cast def create_running_command( command_id: str = "command-id", command_key: str = "command-key", command_type: str = "command-type", created_at: datetime = datetime(year=2021, month=1, day=1), params: Optional[BaseModel] = None, ) -> cmd.Command: """Given command data, build a running command model.""" return cast( cmd.Command, cmd.BaseCommand( id=command_id, key=command_key, createdAt=created_at, commandType=command_type, status=cmd.CommandStatus.RUNNING, params=params or BaseModel(), ), )
a9e38fd534aaf29ebe265279eb86ed90233113fb
438
def x11_linux_stop_record(): """ stop test_record action """ return xwindows_listener.stop_record()
3a5e4728f8d6d27083ee43c26c84ba22133e0621
439
def yxy_intrinsic(mat: np.ndarray) -> np.ndarray: """Return yxy intrinsic Euler angle decomposition of mat (.., 4, 4))""" # extract components not_nan, r00, r01, r02, r10, r11, r12, _, r21, _ = extract_mat_components(mat) # pre-initialize results theta_y0 = np.full(not_nan.shape, np.nan) theta_x = np.full(not_nan.shape, np.nan) theta_y1 = np.full(not_nan.shape, np.nan) # compute Euler angles theta_y0[not_nan] = np.where(r11 < 1, np.where(r11 > -1, np.arctan2(-r01, -r21), 0), 0) theta_x[not_nan] = np.where(r11 < 1, np.where(r11 > -1, -np.arccos(r11), -np.pi), 0) theta_y1[not_nan] = np.where(r11 < 1, np.where(r11 > -1, np.arctan2(-r10, r12), np.arctan2(r02, r00)), np.arctan2(r02, r00)) return np.stack((theta_y0, theta_x, theta_y1), -1)
d7fd0ab01c3c7cf27839caff53a905294e47b7ba
440
def mnemonic_and_path_to_key(*, mnemonic: str, path: str, password: str) -> int: """ Return the SK at position `path`, derived from `mnemonic`. The password is to be compliant with BIP39 mnemonics that use passwords, but is not used by this CLI outside of tests. """ seed = get_seed(mnemonic=mnemonic, password=password) sk = derive_master_SK(seed) for node in path_to_nodes(path): sk = derive_child_SK(parent_SK=sk, index=node) return sk
6127278c78a1e52e362c2d66c4eb065f63de0ba9
441
import inspect def test_function_with_annotations(): """Parse a function docstring with signature annotations.""" def f(x: int, y: int, *, z: int) -> int: """ This function has annotations. Parameters: x: X value. y: Y value. Keyword Arguments: z: Z value. Returns: Sum X + Y. """ return x + y sections, errors = parse(inspect.getdoc(f), inspect.signature(f)) assert len(sections) == 4 assert not errors
5fe7d046659a9e1511f8f321d186cd6e8f1d8d43
442
def acceleration(bodies, i, j): """ Calculer l'acceleration relative à un objet bodies[i] bodies: tous les objets i: index of concerned body which undergoes the gravitation of other objects. j: index of the step """ N = len(bodies) ax = 0; ay = 0; az = 0 #L'acceleration for ip in range(N): #Chaque objet bodies[ip] applique une force de gravitation sur l'objet bodies[i] if ip == i: #On veut que pas avoir le même objet bodies[ip] continue # print(fx(bodies[ip].masse, bodies[i].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j])) ax += fx(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j]) ay += fy(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j]) az += fz(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j]) return (ax, ay, az)
e19ab098a72d43d0f28931d853866ba0999bd39d
443
import re def formatted(s): """If s contains substrings of form '#'<txt>'#', '(('<txt>'))', "''"<txt>"''", returns list of tuples (FORMAT_x, txt). Otherwise, returns s. """ matches = re.findall(_format_re, normalize(s)) if len(matches) == 1 and matches[0][0] != '': return matches[0][0] def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a): if txt_none != '': return FORMAT_NONE, txt_none elif txt_sw != '': return FORMAT_SW, txt_sw elif txt_rem != '': return FORMAT_REM, txt_rem elif txt_em != '': return FORMAT_EM, txt_em elif txt_a != '': return FORMAT_A, txt_a return [to_fmt(*m) for m in matches]
e164d743c5284de744948ab9db72f8887e380dc2
444
def deep_len(lnk): """ Returns the deep length of a possibly deep linked list. >>> deep_len(Link(1, Link(2, Link(3)))) 3 >>> deep_len(Link(Link(1, Link(2)), Link(3, Link(4)))) 4 >>> levels = Link(Link(Link(1, Link(2)), \ Link(3)), Link(Link(4), Link(5))) >>> print(levels) <<<1 2> 3> <4> 5> >>> deep_len(levels) 5 """ if not lnk: return 0 if type(lnk.first) == int: return 1 + deep_len(lnk.rest) return deep_len(lnk.first) + deep_len(lnk.rest)
d8a33600085e51b181752b2dd81d5bcdae7aaff9
445
def union(A, B): """ Add two subspaces (A, B) together. Args: - A: a matrix whose columns span subspace A [ndarray]. - B: a matrix whose columns span subspace B [ndarray]. Returns: - union: a matrix whose columns form the orthogonal basis for subspace addition A+B [ndarray]. """ m,n = A.shape x,y = B.shape if m != x: raise Exception('input matrices need to be of same height'); T = np.hstack((A, B)) return image(T)
d88f09cd4be80d06d7ae0d6e8397e46910f81a90
446
def ldns_create_nsec(*args): """LDNS buffer.""" return _ldns.ldns_create_nsec(*args)
f9e8fd181f4476c745a9ac12c513e24e7939e2e3
447
def str_to_seconds(time): """ Returns the number of seconds since midnight in the string time (as an int). The value time is a string in extended ISO 8601 format. That is, it has the form 'hh:mm:ss' where h, m, and s are digits. There must be exactly two digits each for hours, minutes, and seconds, so they are padded with 0s when necessary. So seconds, minutes, and hours may have leading 0s if they are only one digit. For more information, see https://en.wikipedia.org/wiki/ISO_8601#Times This function does not support time zones, abbreviated formats, or decimals Example: str_to_seconds('12:35:15') returns 45315 Example: str_to_seconds('03:02:05') returns 10925 Example: str_to_seconds('00:00:00') returns 0 Parameter time: The string representation of the time Precondition: time is a string in extended ISO 8601 format 'hh:mm:ss' """ assert type(time) == str assert len(time) == 8 assert iso_8601(time) == True result = get_hours(time)*60*60 + get_minutes(time)*60 + get_seconds(time) return result # assert iso_8601(time) == True | works but not whats needed # assert type(time[get_hours(time):get_seconds(time)]) == str | works but not whats needed # ¬ assert time == str # change params in fn from time to hr, min, sec | str concatination? #assert introcs.isdigit(time[0:1+1]) and introcs.isdigit(time[3:4+1]) and introcs.isdigit(time[6:7+1]) == True #assert type(time[get_hours(time):get_seconds(time)]) == str #print(time[0:1+1], time[3:4+1], time[6:7+1])
3902e5743567f6d07e2c78fa76e5bc2fc0d6306f
448