content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import gc def test_harvest_lost_resources(pool): """Test unreferenced resources are returned to the pool.""" def get_resource_id(): """ Ensures ``Resource`` falls out of scope before calling ``_harvest_lost_resources()``. """ return id(pool.get_resource()._resource) r_id = get_resource_id() # Run garbage collection to ensure ``Resource`` created in # ``get_resource_id()`` is destroyed. gc.collect() pool._harvest_lost_resources() assert r_id == id(pool.get_resource()._resource)
04b8b29520c2ae9c2c47cef412659e9c567c6a8a
3,659,424
def __call__for_keras_init_v1(self, shape, dtype=None, partition_info=None): """ Making keras VarianceScaling initializers v1 support dynamic shape. """ if dtype is None: dtype = self.dtype scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans_for_keras_init_v1_v2(scale_shape) fan_in = math_ops.cast(fan_in, dtype=dtype) fan_out = math_ops.cast(fan_out, dtype=dtype) if self.mode == "fan_in": scale /= math_ops.maximum(1., fan_in) elif self.mode == "fan_out": scale /= math_ops.maximum(1., fan_out) else: scale /= math_ops.maximum(1., (fan_in + fan_out) / 2.) if self.distribution == "normal" or self.distribution == "truncated_normal": # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = math_ops.sqrt(scale) / .87962566103423978 return random_ops.truncated_normal(shape, 0.0, stddev, dtype, seed=self.seed) elif self.distribution == "untruncated_normal": stddev = math_ops.sqrt(scale) return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed) else: limit = math_ops.sqrt(3.0 * scale) return random_ops.random_uniform(shape, -limit, limit, dtype, seed=self.seed)
860dc27ecd133b5bb193c4856736f9bb1a52d243
3,659,425
def create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, geodata=None, df=1., parallel=1, in_service=True, max_loading_percent=nan): """ create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, \ geodata=None, df=1., parallel=1, in_service=True, max_loading_percent=nan) Creates a line element in net["line"] The line parameters are defined through the standard type library. INPUT: **net** - The net within this line should be created **from_bus** (int) - ID of the bus on one side which the line will be connected with **to_bus** (int) - ID of the bus on the other side which the line will be connected with **length_km** (float) - The line length in km **std_type** (string) - The linetype of a standard line pre-defined in standard_linetypes. OPTIONAL: **name** (string) - A custom name for this line **index** (int, None) - Force a specified ID if it is available. If None, the index one \ higher than the highest already existing index is selected. **geodata** (array, default None, shape= (,2L)) - The linegeodata of the line. The first row should be the coordinates of bus a and the last should be the coordinates of bus b. The points in the middle represent the bending points of the line **in_service** (boolean) - True for in_service or False for out of service **df** (float) - derating factor: maximal current of line in relation to nominal current \ of line (from 0 to 1) **parallel** (integer) - number of parallel line systems **max_loading_percent (float)** - maximum current loading (only needed for OPF) OUTPUT: **index** (int) - The unique ID of the created line EXAMPLE: create_line(net, "line1", from_bus = 0, to_bus = 1, length_km=0.1, std_type="NAYY 4x50 SE") """ # check if bus exist to attach the line to for b in [from_bus, to_bus]: if b not in net["bus"].index.values: raise UserWarning("Line %s tries to attach to non-existing bus %s" % (name, b)) if index is None: index = get_free_id(net["line"]) if index in net["line"].index: raise UserWarning("A line with index %s already exists" % index) v = { "name": name, "length_km": length_km, "from_bus": from_bus, "to_bus": to_bus, "in_service": bool(in_service), "std_type": std_type, "df": df, "parallel": parallel } lineparam = load_std_type(net, std_type, "line") v.update({ "r_ohm_per_km": lineparam["r_ohm_per_km"], "x_ohm_per_km": lineparam["x_ohm_per_km"], "c_nf_per_km": lineparam["c_nf_per_km"], "max_i_ka": lineparam["max_i_ka"] }) if "type" in lineparam: v.update({"type": lineparam["type"]}) # store dtypes dtypes = net.line.dtypes net.line.loc[index, list(v.keys())] = list(v.values()) # and preserve dtypes _preserve_dtypes(net.line, dtypes) if geodata is not None: net["line_geodata"].loc[index, "coords"] = geodata if not isnan(max_loading_percent): if "max_loading_percent" not in net.line.columns: net.line.loc[:, "max_loading_percent"] = pd.Series() net.line.loc[index, "max_loading_percent"] = float(max_loading_percent) return index
218a3a16bce0d746465991c0992f614bddf98892
3,659,426
def get_initmap(X, A=None, standardize=False, cov_func=None): """ Give back parameters such that we have the L U decomposition of the product with A (if given, or the PCA scores if not). That is we will get back: X[:, perm]*L*U + b = ((X-meanvec)/stdvec)*A where A are PCA directions if not given, L, U are LU decomposition, and meanvec, stdvec are zeros, ones vectors if not standardizing. Args: X: N x d array of training data A: d x d linear map to decompose, XA+b, (uses Identity if None given with no cov_func). standardize: boolean that indicates to standardize the dimensions of X after applying linear map. cov_func: function that yeilds a linear map given covariance matrix of X. Returns: init_mat: d x d matrix where stricly lower triangle is corresponds to L and upper triangle corresponds to U. b: d length vector of offset perm: permuation of dimensions of X """ # import pdb; pdb.set_trace() # XXX BREAKPOINT N, d = X.shape if A is None: if cov_func is None: A = np.eye(d) b = np.zeros((1, d)) else: b = -np.mean(X, 0, keepdims=True) M = (X+b) # Has mean zero. cov = np.matmul(M.T, M)/N A = cov_func(cov) b = np.matmul(b, A) if standardize: z = np.matmul(X, A)+b mean_vec = np.mean(z, 0, keepdims=True) # std_vec = np.std(z, 0, keepdims=True) # Standardizing may lead to outliers, better to get things in [-1, 1]. # std_vec = np.max(np.abs(z-mean_vec), 0, keepdims=True) std_vec = np.maximum(np.max(np.abs(z-mean_vec), 0, keepdims=True), np.ones((1, d)), dtype=np.float32) # import pdb; pdb.set_trace() # XXX BREAKPOINT else: mean_vec = np.zeros((1, d)) std_vec = np.ones((1, d)) AS = np.divide(A, std_vec) P, L, U = linalg.lu(AS) perm = np.concatenate([np.flatnonzero(P[:, i]) for i in range(P.shape[1])]) init_mat = np.tril(L, -1) + U init_b = np.squeeze((b-mean_vec)/std_vec) return np.float32(init_mat), np.float32(init_b), perm
53ec26f8efe4c0869b4e4423419db32ed08128e0
3,659,427
def read_FQ_matlab(file_open): """ Opens FISH-quant result files generated with Matlab (tab-delimited text file). Args: file_open (string): string containing the full file name. Returns: dictionary containing outlines of cells, and if present the detected spots. """ # Open file with open(file_open, "r") as fh: data = fh.readlines() # Strip white space characters data = [x.strip() for x in data] # Loop over read-in data fq_dict = {'cells':{},'file_names':{},'settings':{}} iLine = 0 while iLine < len(data): line = data[iLine] # READ FILE NAMES if 'IMG_Raw' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'smFISH':img_name[1]}) if 'IMG_Filtered' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'smFISH_filt':img_name[1]}) if 'IMG_DAPI' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'DAPI':img_name[1]}) if 'FILE_settings' in line: img_name = line.split('\t') if len(img_name) == 2: fq_dict['file_names'].update({'settings':img_name[1]}) # READ IMAGE PARAMETERS if 'PARAMETERS' in line: iLine += 2 par_microscope = data[iLine].split('\t') fq_dict['settings'].update({'microscope':{'pix_xy':float(par_microscope[0]), 'pix_z':float(par_microscope[1]), 'RI':float(par_microscope[2]), 'EX':float(par_microscope[3]), 'EM':float(par_microscope[4]), 'NA':float(par_microscope[5]), 'type':par_microscope[6]}}) # New cell if 'CELL_START' in line: # Get name of cell cell_id = line.split('\t')[1] ### POSITION OF CELL # Read X-POS iLine += 1 pos_list = (data[iLine].replace('X_POS\t','')).split('\t') x_pos = [int(s) for s in pos_list] # Read Y-POS iLine += 1 pos_list = (data[iLine].replace('Y_POS\t','')).split('\t') y_pos = [int(s) for s in pos_list] # Read Z-POS iLine += 1 pos_list = (data[iLine].replace('Z_POS\t','')).split('\t') if len(pos_list) > 1: z_pos = [int(s) for s in pos_list] else: z_pos = [''] fq_dict['cells'].update({cell_id:{'cell_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}}) # New nucleus if 'Nucleus_START' in line: # Get name of cell nuc_id = line.split('\t')[1] ### POSITION OF CELL # Read X-POS iLine += 1 pos_list = (data[iLine].replace('X_POS\t','')).split('\t') x_pos = [int(s) for s in pos_list] # Read Y-POS iLine += 1 pos_list = (data[iLine].replace('Y_POS\t','')).split('\t') y_pos = [int(s) for s in pos_list] # Read Z-POS iLine += 1 pos_list = (data[iLine].replace('Z_POS\t','')).split('\t') if len(pos_list) > 1: z_pos = [int(s) for s in pos_list] else: z_pos = [''] fq_dict['cells'][cell_id].update({nuc_id:{'nuc_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}}) # Position of detected RNAS if 'SPOTS_START' in line: iLine += 2 # Move over header RNA_prop = [] while not('SPOTS_END' in data[iLine]): RNA_prop.append([float(s) for s in data[iLine].split('\t')]) iLine += 1 # Assign to dictionary fq_dict['cells'][cell_id].update({'spots': np.array(RNA_prop)}) # Up date line counter iLine += 1 return fq_dict
01c2c2263573e754c216c69496f648a883bb1843
3,659,428
def create_default_reporting_options(embedded=True, config={}): """ config must follow this scheme: { `table_name`: { `option1`: `value1` } } The different options will depend on the table role. - for ALL tables: {n 'data' : { 'remove_columns': ['column_name1'], 'subsampling_factor': 1.0, 'keep_last_n_rows': 1000 } - For role `data_samples`: { 'default': 'Scatter X Axis': value, 'Scatter Y Axis': value, 'Color by': value, 'Color scheme': value, 'Binning X Axis': value, 'Binning Y Axis': value, 'Label with': value, 'Display with': value, } """ o = Object() o.image_size = 80 o.font_size = 19 o.frame_size_x = None o.frame_size_y = 768 o.data_samples = Object() o.data_samples.display_tabular = True o.data_samples.display_scatter = True o.data_samples.max_numpy_display = 10 # if array below this size, the content will be displayed o.data_samples.resize_heterogeneous_numpy = True # if True, numpy arrays of different shape will be resized to common average size o.db_root = None o.embedded = embedded o.style = Object() o.style.color_by_line_width = 1 o.style.scatter_aspect_ratio = 1.5 o.style.tool_window_size_x = 200 o.style.tool_window_size_y = 500 o.style.sorted_legend = True o.style.category_margin = 0.2 o.style.scatter_continuous_factor = 10 o.data = Object() o.data.refresh_time = 5.0 o.data.unpack_numpy_arrays_with_less_than_x_columns = 15 o.data.types_to_discard = [ np.dtype('|S1'), # binary string ] o.config = config return o
cc7d341a0d63979bbf3223a241c5707acf057547
3,659,429
def get_patient_note(state, patient_id, note_id, *args, **kwargs): """ Return a note for a patient. --- tags: ["FHIR"] parameters: - name: patient_id in: path description: ID of the patient of interest required: true schema: type: string - name: note_id in: path description: ID of the note of interest required: true schema: type: string responses: 200: description: "Note returned" content: application/json: schema: type: array items: type: object 404: description: "No patient or note exists with identifier" content: text/plain: schema: type: string 428: description: "No FHIR data currently in application state" content: text/plain: schema: type: string """ p = state.patients.get(patient_id) if p is None: return ( f'No patient exists with identifier "{patient_id}".', 404, {'Content-Type': 'text/plain'} ) n = p.notes.get(note_id) if n is None: return ( f'No note exists with identifier "{note_id}".', 404, {'Content-Type': 'text/plain'} ) return jsonify(n.to_dict())
399212c31d2ae34b96a5617ca73063745c22621c
3,659,430
def _html_build_item(tag: str, text: str, attributes: map = None, include_tags=True) -> str: """Builds an HTML inline element and returns the HTML output. :param str tag: the HTML tag :param str text: the text between the HTML tags :param map attributes: map of attributes :param bool include_tags: True if the tags should be part of the output """ attributes = attributes if attributes is not None else {} opening_tag = "<" + tag + _html_build_attributes(attributes) + ">" closing_tag = "</" + tag + ">" if include_tags: return opening_tag + text + closing_tag else: return text
13b165a98679c2ebaf9a1dec7619a3297c729a63
3,659,431
from typing import Dict from typing import Optional import random def sim_sample( out_prefix: str, sample_id: int, chrom_start: int = 0, chrom_end: int = 10000, start_rate: float = 0.001, end_rate: float = 0.01, mut_rate: float = 0.01, ) -> Dict[str, File]: """ Simulate sequencing data for one sample (assume one chromosome). regions are sequenced intervals of a chromsome. muts are SNP locations, assume heterozygous. """ regions = [] muts = [] region_start: Optional[int] # Sample initial state. non_seq_len = 1 / start_rate seq_len = 1 / end_rate if random.random() < seq_len / (seq_len + non_seq_len): region_start = chrom_start else: region_start = None # Use poisson process to sample regions and mutation sites. pos = chrom_start while pos < chrom_end: pos += 1 if region_start is None: pos += int(sample_exponential(start_rate)) if pos >= chrom_end: break region_start = pos else: region_end = min(pos + int(sample_exponential(end_rate)), chrom_end - 1) mut_pos = pos + int(sample_exponential(mut_rate)) if region_end <= mut_pos: regions.append((region_start, region_end, 2)) region_start = None pos = region_end else: pos = mut_pos muts.append((mut_pos, 1)) return { "regions": write_regions(f"{out_prefix}/regions/{sample_id}.regions", regions), "mutations": write_mutations(f"{out_prefix}/muts/{sample_id}.muts", muts), }
d8a858b3f8099dd57cdc7abb4f1473e238038536
3,659,432
def vif_col(X, y, col_name): """计算vif 计算具体一个column的vif, 一般阈值在5或者10,超过这个数字则表明有 共线性。 Attributes: X (pd.DataFrame): 自变量 y (pd.Series): 因变量 col_name (str): 需要判断的列 References: James, Gareth, Daniela Witten, Trevor Hastie, and Robert Tibshirani. An Introduction to Statistical Learning. pp. 112, Vol. 112: Springer, 2013. """ r_square_minus = model(X.loc[:, X.columns != col_name].values, y).rsquared return 1 / (1 - r_square_minus)
6d9c88d928934d60182b597a89c6da6d1f7d1194
3,659,433
def get_mesh_stat(stat_id_start_str, attr_value, xmin, ymin, xmax, ymax): """ 地域メッシュの統計情報を取得する @param stat_id_start_str 統計IDの開始文字 この文字から始まるIDをすべて取得する. @param attr_value cat01において絞り込む値 @param xmin 取得範囲 @param ymin 取得範囲 @param xmax 取得範囲 @param ymax 取得範囲 """ rows = database_proxy.get_conn().execute(""" SELECT statValue.value, AsGeoJson(MapArea.Geometry) FROM MapArea inner join idx_MapArea_Geometry ON pkid = MapArea.id AND xmin > ? AND ymin > ? AND xmax < ? AND ymax < ? inner join statValueAttr ON MapArea.stat_val_attr_id = statValueAttr.id inner join statValueAttr AS b ON b.stat_value_id = statValueAttr.stat_value_id AND b.attr_value = ? inner join statValue ON statValue.id = b.stat_value_id WHERE MapArea.stat_id like ?; """, (xmin, ymin, xmax, ymax, attr_value, stat_id_start_str + '%')) ret = [] for r in rows: ret.append({ 'value': r[0], 'geometory': r[1] }) return ret
9a861925436c2cf10eb4773be9dfa79c901d43f4
3,659,434
def babel_extract(fileobj, keywords, comment_tags, options): """Babel extraction method for Jinja templates. .. versionchanged:: 2.3 Basic support for translation comments was added. If `comment_tags` is now set to a list of keywords for extraction, the extractor will try to find the best preceeding comment that begins with one of the keywords. For best results, make sure to not have more than one gettext call in one line of code and the matching comment in the same line or the line before. .. versionchanged:: 2.5.1 The `newstyle_gettext` flag can be set to `True` to enable newstyle gettext calls. .. versionchanged:: 2.7 A `silent` option can now be provided. If set to `False` template syntax errors are propagated instead of being ignored. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results. :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. (comments will be empty currently) """ extensions = set() for extension in options.get('extensions', '').split(','): extension = extension.strip() if not extension: continue extensions.add(import_string(extension)) if InternationalizationExtension not in extensions: extensions.add(InternationalizationExtension) def getbool(options, key, default=False): return options.get(key, str(default)).lower() in \ ('1', 'on', 'yes', 'true') silent = getbool(options, 'silent', True) environment = Environment( options.get('block_start_string', BLOCK_START_STRING), options.get('block_end_string', BLOCK_END_STRING), options.get('variable_start_string', VARIABLE_START_STRING), options.get('variable_end_string', VARIABLE_END_STRING), options.get('comment_start_string', COMMENT_START_STRING), options.get('comment_end_string', COMMENT_END_STRING), options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX, options.get('line_comment_prefix') or LINE_COMMENT_PREFIX, getbool(options, 'trim_blocks', TRIM_BLOCKS), getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS), NEWLINE_SEQUENCE, getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE), frozenset(extensions), cache_size=0, auto_reload=False ) if getbool(options, 'trimmed'): environment.policies['ext.i18n.trimmed'] = True if getbool(options, 'newstyle_gettext'): environment.newstyle_gettext = True source = fileobj.read().decode(options.get('encoding', 'utf-8')) try: node = environment.parse(source) tokens = list(environment.lex(environment.preprocess(source))) except TemplateSyntaxError as e: if not silent: raise # skip templates with syntax errors return finder = _CommentFinder(tokens, comment_tags) for lineno, func, message in extract_from_ast(node, keywords): yield lineno, func, message, finder.find_comments(lineno)
35ee7c05ee91afc1ccf7c752bdff72e3c3d30d78
3,659,435
import numpy def onedthreegaussian(x, H, A1, dx1, w1, A2, dx2, w2, A3, dx3, w3): """ Returns two 1-dimensional gaussian of form H+A*numpy.exp(-(x-dx)**2/(2*w**2)) """ g1 = A1 * numpy.exp(-(x-dx1)**2 / (2*w1**2)) g2 = A2 * numpy.exp(-(x-dx2)**2 / (2*w2**2)) g3 = A3 * numpy.exp(-(x-dx3)**2 / (2*w3**2)) return H + g1 + g2 + g3
f93ea1339fe1498fdaeaee91f75b7ba316455646
3,659,437
from typing import Optional from typing import Union def confusion_matrix_by_prob(true: np.ndarray, predicted_prob: np.ndarray, thresholds: Optional[Union[list, tuple, np.ndarray]] = None, pos_label: Union[bool, str, int] = _DEFAULT_POS_LABEL, output_metrics: Optional[list] = None, table: bool = True, **kwargs): """ confusion matrix for binary classification according to a given set of thresholds; :param true: numpy.ndarray(shape=(m), ), an array of true classes; :param predicted_prob: numpy.ndarray(shape=(m), ), an array of predicted probabilities of being the positive class; :param thresholds: [list, tuple, np.array, None] the thresholds set on predicted probabilities such that any predicted probability greater or equal to the threshold will be classified as the positive class; :param pos_label: [str, bool, int], positive class label, label that is considered as the positive class; :param output_metrics: [list, None], metrics to be outputted if selected; :param table: bool, if exported as a pd table table; :param kwargs: :param metric_order: [list, None], if table is selected to be the output, metric order specifies the order of metrics presented in the table; :return: dict, a set of confusion matrices, {threshold: {metric_name: metric_value, ...}, ...}; """ # convert true series to positive series true = true == pos_label # select output: if isinstance(output_metrics, list): for selected_metric in output_metrics: if selected_metric not in _FULL_METRICS: raise KeyError(f"metric {selected_metric} is not recognized.") elif output_metrics == 'confusion': output_metrics = ['TP', 'FN', 'FP', 'TN', 'Recall', 'FNR', 'FPR', 'TNR', 'Precision', 'FOR', 'FDR', 'NPV', 'Prevalence', 'Accuracy'] else: output_metrics = _FULL_METRICS metrics_by_thresholds = dict() for threshold in thresholds: metrics_by_threshold = dict() predicted = predicted_prob >= threshold confusion_matrix_dict = confusion_matrix(true=true, predicted=predicted, normalize=False) confusion_matrix_nor_true = normalize_confusion_matrix(confusion_matrix_dict=confusion_matrix_dict, normalize_index=0) confusion_matrix_nor_predicted = normalize_confusion_matrix(confusion_matrix_dict=confusion_matrix_dict, normalize_index=1) if 'TP' in output_metrics: metrics_by_threshold['TP'] = confusion_matrix_dict[(True, True)] if 'FN' in output_metrics: metrics_by_threshold['FN'] = confusion_matrix_dict[(True, False)] if 'FP' in output_metrics: metrics_by_threshold['FP'] = confusion_matrix_dict[(False, True)] if 'TN' in output_metrics: metrics_by_threshold['TN'] = confusion_matrix_dict[(False, False)] if 'Recall' in output_metrics: metrics_by_threshold['Recall'] = confusion_matrix_nor_true[(True, True)] if 'FNR' in output_metrics: metrics_by_threshold['FNR'] = confusion_matrix_nor_true[(True, False)] if 'FPR' in output_metrics: metrics_by_threshold['FPR'] = confusion_matrix_nor_true[(False, True)] if 'TNR' in output_metrics: metrics_by_threshold['TNR'] = confusion_matrix_nor_true[(False, False)] if 'Precision' in output_metrics: metrics_by_threshold['Precision'] = confusion_matrix_nor_predicted[(True, True)] if 'FOR' in output_metrics: metrics_by_threshold['FOR'] = confusion_matrix_nor_predicted[(True, False)] if 'FDR' in output_metrics: metrics_by_threshold['FDR'] = confusion_matrix_nor_predicted[(False, True)] if 'NPV' in output_metrics: metrics_by_threshold['NPV'] = confusion_matrix_nor_predicted[(False, False)] if 'Prevalence' in output_metrics: metrics_by_threshold['Prevalence'] = \ (confusion_matrix_dict[(True, True)] + confusion_matrix_dict[(True, False)]) / sum(confusion_matrix_dict.values()) if 'Accuracy' in output_metrics: metrics_by_threshold['Accuracy'] = \ (confusion_matrix_dict[(True, True)] + confusion_matrix_dict[(False, False)]) / sum(confusion_matrix_dict.values()) if 'LR+' in output_metrics: # positive likelihood ratio: try: metrics_by_threshold['LR+'] = confusion_matrix_nor_true[(True, True)] / confusion_matrix_nor_true[(False, True)] except ZeroDivisionError: metrics_by_threshold['LR+'] = '-' if 'LR-' in output_metrics: # negative likelihood ratio: try: metrics_by_threshold['LR-'] = confusion_matrix_nor_true[(True, False)] / confusion_matrix_nor_true[(False, False)] except ZeroDivisionError: metrics_by_threshold['LR-'] = '-' if 'DOR' in output_metrics: # diagnostic odds ratio: try: metrics_by_threshold['DOR'] = (confusion_matrix_nor_true[(True, True)] / confusion_matrix_nor_true[(False, True)]) / \ (confusion_matrix_nor_true[(True, False)] / confusion_matrix_nor_true[(False, False)]) except ZeroDivisionError: metrics_by_threshold['DOR'] = '-' if 'F1' in output_metrics: # F1 score: try: metrics_by_threshold['F1'] = 2 * (confusion_matrix_nor_true[(True, True)] * confusion_matrix_nor_predicted[(True, True)]) / \ (confusion_matrix_nor_true[(True, True)] + confusion_matrix_nor_predicted[(True, True)]) except ZeroDivisionError: metrics_by_threshold['F1'] = '-' metrics_by_thresholds[threshold] = metrics_by_threshold if table: if 'metric_order' in kwargs: metric_order = kwargs['metric_order'] else: metric_order = None metrics_by_thresholds = \ convert_confusion_matrix_by_prob_to_table_with_reformat_precision(metrics_by_thresholds=metrics_by_thresholds, metric_order=metric_order) return metrics_by_thresholds
29bc8808ae1f35f13e52ac26e4e1993c423c6dc6
3,659,439
from typing import Sequence from typing import Mapping import itertools def group_slaves_by_key_func( key_func: _GenericNodeGroupingFunctionT, slaves: Sequence[_GenericNodeT], sort_func: _GenericNodeSortFunctionT = None, ) -> Mapping[_KeyFuncRetT, Sequence[_GenericNodeT]]: """ Given a function for grouping slaves, return a dict where keys are the unique values returned by the key_func and the values are all those slaves which have that specific value. :param key_func: a function which consumes a slave and returns a value :param slaves: a list of slaves :returns: a dict of key: [slaves] """ sorted_slaves: Sequence[_GenericNodeT] if sort_func is None: sorted_slaves = sorted(slaves, key=key_func) else: sorted_slaves = sort_func(slaves) return {k: list(v) for k, v in itertools.groupby(sorted_slaves, key=key_func)}
c3e286d2ff618758cd86c16f1b6685faea4b4d7a
3,659,440
def init_clfs(): """ init classifiers to train Returns: dict, clfs """ clfs = dict() # clfs['xgb'] = XGBClassifier(n_jobs=-1) clfs['lsvc'] = LinearSVC() return clfs
4725656eda4e6991cc215bcd5a209ff23171eea6
3,659,441
def get_field_types(): """Get a dict with all registration field types.""" return get_field_definitions(RegistrationFormFieldBase)
a9fe05535a541a7a5ada74dc9138a6c2ab29f528
3,659,442
def get_md_links(filepath): """Get markdown links from a md file. The links' order of appearance in the file IS preserved in the output. This is to check for syntax of the format [...](...). The returned 'links' inside the () are not checked for validity or subtle differences (e.g. '/' vs no '/' at the end of a URL). Args: filepath (pathlib Path): Path object representing the file from which info will be extracted. Returns: list of strings """ text_str = _get_ascii_plaintext_from_md_file(filepath) links = _get_all_md_link_info_from_ascii_plaintext(text_str) if links: # links only, not their text return [t[-1] for t in links] else: return links
3076f77802965cb281101530f4ab360e5996f627
3,659,443
def get_reactor_logs(project_id, application_id, api_key=None, **request_kwargs): """ Get the logs of a Reactor script. :param project_id: The Project of the Application. :type project_id: str :param application_id: The Application to get the script logs for. :type application_id: str :param api_key: The API key to authorize request against. :type api_key: str :return: """ url = '/projects/{}/applications/{}/reactorLogs'.format( project_id, application_id) return utils.request('GET', url, api_key=api_key, accept=True, **request_kwargs)
82743619292f387708e7b1dc3fe93c59e232d1cf
3,659,445
def summation_i_squared(n): """Summation without for loop""" if not isinstance(n, int) or n < 1: return None return int(((n*(n+1)*(2*n+1))/6))
dec0aba274bcaf3e3a821db5962af51d39835438
3,659,447
def str_to_number(this): """ Convert string to a Number """ try: return mknumber(int(this.value)) except ValueError: return mknumber(float(this.value))
e67df9c0de5a5cdbc76a3026f7e31cd3190013c4
3,659,448
def plotTSNE(Xdata, target = None, useMulti=True, num=2500, savename=None, njobs=4, size=4, cmap=None, dim=(12,8)): """ Plot TSNE for training data Inputs: > Xdata: The training feature data (DataFrame) > target: The training target data (Series) > num (2500 by default): The number of rows to use Output: None """ sns.set(style="ticks") if Xdata is None: print("Xdata is NONE in plotTSNE!") return None if not isDataFrame(Xdata): print("Xdata is not a Pandas DataFrame!") return None if target is not None: if not isSeries(target): print("target is not a Pandas Series!") return None print("Computing TSNE for {0} events with {1} features".format(num, Xdata.shape[1])) projection, tsneFeatures, tsneTarget = computeTSNE(Xdata=Xdata, target=target, useMulti=useMulti, num=num, njobs=njobs) print("Plotting TSNE for {0} events".format(num)) showTSNE(projection=projection, target=target, savename=savename, title="TSNE", size=size, cmap=cmap, dim=dim) return projection, tsneFeatures, tsneTarget
9751f861df2d67516e93218000d23e23ba0ad4fe
3,659,450
def adjust_contrast(img, contrast_factor): """Adjust contrast of an Image. Args: img (PIL Image): PIL Image to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: PIL Image: Contrast adjusted image. """ if not is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(contrast_factor) return img
aedd8bb489df64138189626585228ffc086e2428
3,659,452
def matplotlib_view(gviz: Digraph): """ Views the diagram using Matplotlib Parameters --------------- gviz Graphviz """ return gview.matplotlib_view(gviz)
9eb0a686c6d01a7d24273bbbc6ddb9b4ee7cb9ac
3,659,453
def shuf_repeat(lst, count): """ Xiaolong's code expects LMDBs with the train list shuffled and repeated, so creating that here to avoid multiple steps. """ final_list = [] ordering = range(len(lst)) for _ in range(count): np.random.shuffle(ordering) final_list += [lst[i] for i in ordering] assert len(final_list) == count * len(lst) return final_list
fea9478aaa37f5b1c58d4a41126055d9cfa4b035
3,659,454
def create_query(table_name, schema_dict): """ see datatypes documentation here: https://www.postgresql.org/docs/11/datatype.html """ columns = db_schema[table_name] return ( f"goodbooks_{table_name}", [f"{column} {value}" for column, value in columns.items()], )
3b330d57f45ca053cfbe90952adc7aa1658ab76d
3,659,455
import requests def delete_repleciation(zfssrcfs, repel_uuid): """ZFS repleciation action status accepts: An exsistng ZFS action uuid (id). returns: the ZFS return status code. """ r = requests.delete( "%s/api/storage/v1/replication/actions/%s" % (url, repel_uuid), auth=zfsauth, verify=False, headers=jsonheader, ) logger.info("Deleting local repleciation for %s(%s)", repel_uuid, zfssrcfs) return r.status_code
f62ad1ec3e31ac7c54cf749982690631bb7b72d2
3,659,457
import aiohttp def get_logged_in_session(websession: aiohttp.ClientSession) -> RenaultSession: """Get initialised RenaultSession.""" return RenaultSession( websession=websession, country=TEST_COUNTRY, locale_details=TEST_LOCALE_DETAILS, credential_store=get_logged_in_credential_store(), )
87a5a439c5ca583c01151f340ce79f2f4a79558c
3,659,459
def __getStationName(name, id): """Construct a station name.""" name = name.replace("Meetstation", "") name = name.strip() name += " (%s)" % id return name
daab36ed8020536c8dd2c073c352634696a63f3e
3,659,460
def post_url(url): """Post url argument type :param str url: the post url :rtype: str :returns: the post url """ url = url.strip() if len(url) == 0: raise ArgumentTypeError("A url is required") elif len(url) > Url.URL_LENGTH: raise ArgumentTypeError("The url length is over the maximum allowed") return url
65d3c670580d6abfcfefcc8bcff35ca4e7d51f5c
3,659,462
def create_planner(request): """Create a new planner and redirect to new planner page.""" user = request.user plan = Plan.objects.create(author=user) plan.save() return HttpResponseRedirect(reverse('planner:edit_plan', args=[plan.id], ))
ab22dfa950208b44c308690dcff6e0f228faa406
3,659,463
def rule_matching_evaluation(df, model, seed_num, rein_num, eval_num, label_map, refer_label, lime_flag=True, scan_flag=False , content_direction='forward', xcol_name='text', n_cores=20): """A integrated rule extraction, refinement and validation process. On the dataset, sample based methods are used. Seed rules are extracted and unmatched samples in reinforcement samples are re-fed into extraction procedure. Validation are conducted in loops until certain condition is meet. Args: df: dataframe to be explained. model: model that can classify instances. seed_num: sample size for seed rule generation. rein_num: sample size for reinforcement procedure. eval_num: sample size for evaluation procedure. label_map: label text and value mappings. refer_label: the reference label for lime. lime_flag: on-off flag for lime based inference rules. scan_flag: on-off flag for LCS based scan rules. content_direction: cut out sequences from 'forward' or 'backward' xcol_name: column name for content to be explained in df. n_cores: number of cores to utilize. Returns: match_result: match result on evaluation test sets. rules_tobe_validate: final rules generated. matched_rules: rules hit by evaluation test samples. """ # shuffle dataset df.sample(frac=1, random_state=1) # generate seed rules df_for_seed = df[df['target'] == label_map['malicious']].sample(seed_num, random_state=2) rules_seed = get_rules(df_for_seed, model, label_map, 'malicious', lime_flag=lime_flag, scan_flag=scan_flag, content_direction=content_direction, n_cores=n_cores) print(rules_seed) # reinforce rules max_iter_times = 2 df_split = np.array_split(df, max_iter_times) rules_tobe_validate = rules_seed for i in range(0, max_iter_times): print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') print('Reinforce iteration loop %d'% (i+1)) print('Seed rules number: %d' % rules_tobe_validate.shape[0]) df_for_reinforce = df_split[i].sample(rein_num, random_state=3) match_result, rules_tobe_validate = rule_validation(df_for_reinforce, rules_tobe_validate, n_cores=n_cores) # # make duplicate removal for each validation # rules_tobe_validate = rule_deduplicate(rules_tobe_validate) metrics = get_metrics(match_result) print(metrics) if float(metrics['acc']) > 0.98: print("Validation finished, metrics is fine.") break else: # Reinforcement the unrecognizable malicious flows according to validation results df_rein = match_result.loc[(match_result.match == 0) & (match_result.target == label_map['malicious'])][['text', 'target']] df_rein['text'] = df_rein['text'].astype(str) result_rein = get_rules(df_rein, model, label_map, 'malicious', lime_flag=lime_flag, scan_flag=scan_flag, content_direction=content_direction, n_cores=n_cores) result_final = pd.concat([rules_tobe_validate, result_rein]) # index start from 1 result_final.index = np.arange(1, len(result_final)+1) rules_tobe_validate = result_final print('New rein rules number: %d' % result_rein.shape[0]) print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') print('--------------------------------------------------------------------------------------------------------') df_for_final_eval = df.sample(seed_num, random_state=4) match_result, rules_tobe_validate = rule_validation(df_for_final_eval, rules_tobe_validate, final_flag=True, n_cores=n_cores) if rules_tobe_validate.shape[0] == 0: print("Rule extraction failed!!!!!") return 0, 0, 0 else: print('The final results are:') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') matched_rules = get_final_rules(match_result, rules_tobe_validate) metrics = get_metrics(match_result) print(metrics) print(matched_rules) print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') print("Final validation finished") return match_result, rules_tobe_validate, matched_rules
9ed0d5653797544de384c41ef6d9e402d2a57403
3,659,464
def login(): """ Typical login page """ # if current user is already logged in, then don't log in again if current_user.is_authenticated: return redirect(url_for('index')) form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(username=form.username.data).first() if user is None or not user.check_password(form.password.data): flash('Invalid username or password') return redirect(url_for('login')) # user exists and password is correct login_user(user, remember=form.remember_me.data) # if user came from a local page, then return them to that # page after authentication ... else go to /index next_page = request.args.get('next') if not next_page or url_parse(next_page).netloc != '': next_page = url_for('index') return redirect(next_page) # GET just renders the empty login screen return render_template('login.html', title='Sign In', form=form)
e4114979a6b5b5845f32442bb66ee0798357f4e7
3,659,465
def create_timeperiod_map( start: spec.Timestamp = None, end: spec.Timestamp = None, length: spec.Timelength = None, ) -> spec.TimeperiodMap: """create Timeperiod with representation TimeperiodMap ## Inputs - start: Timestamp - end: Timestamp - length: Timelength ## Returns - TimeperiodMap """ start, end = compute_start_end(start=start, end=end, length=length) return {'start': start, 'end': end}
c8087ea252e86b97c55376bfb21b93c2b50e3b19
3,659,466
import requests async def patched_send_async(self, *args, **kwargs): """Patched send function that push to queue idx of server to which request is routed.""" buf = args[0] if buf and len(buf) >= 6: op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) # Filter only caches operation. if 1000 <= op_code < 1100: requests.append(self.port % 100) return await old_send_async(self, *args, **kwargs)
c78c9b437547266b4bfa82627c45e3c7c6450049
3,659,467
from datetime import datetime def add_event_records(df, event_type, event_date): """Add event records for the event type.""" log(f'Adding {DATASET_ID} event records for {event_type}') this_year = datetime.now().year df = df.loc[df[event_date].notnull(), :].copy() df['event_id'] = db.create_ids(df, 'events') df['dataset_id'] = DATASET_ID df['year'] = df[event_date].dt.strftime('%Y').astype(int) df['year'] = df['year'].apply(lambda x: x - 100 if x > this_year else x) df['day'] = df[event_date].dt.strftime('%j').astype(int) df['event_type'] = event_type df['event_json'] = util.json_object(df, EVENT_FIELDS) df.loc[:, db.EVENT_FIELDS].to_sql( 'events', db.connect(), if_exists='append', index=False) return df
d3e804d9b24274e5a87e1e470f1f758214e1f805
3,659,468
def _renderPath(path,drawFuncs,countOnly=False,forceClose=False): """Helper function for renderers.""" # this could be a method of Path... points = path.points i = 0 hadClosePath = 0 hadMoveTo = 0 active = not countOnly for op in path.operators: if op == _MOVETO: if forceClose: if hadMoveTo and pop!=_CLOSEPATH: hadClosePath += 1 if active: drawFuncs[_CLOSEPATH]() hadMoveTo += 1 nArgs = _PATH_OP_ARG_COUNT[op] j = i + nArgs drawFuncs[op](*points[i:j]) i = j if op == _CLOSEPATH: hadClosePath += 1 pop = op if forceClose and hadMoveTo and pop!=_CLOSEPATH: hadClosePath += 1 if active: drawFuncs[_CLOSEPATH]() return hadMoveTo == hadClosePath
17a2fc3224b2ba80de9dee0110468c4d934281b7
3,659,469
def _search_focus(s, code=None): """ Search for a particular module / presentation. The search should return only a single item. """ if not code: code = input("Module code (e.g. TM129-17J): ") results = _search_by_code(s, code) if not len(results): print('Nothing found for "{}"'.format(code)) elif len(results) > 1: print( "Please be more specific:\n\t{}\n".format( "\n\t".join([r[0].split(" ")[0] for r in results]) ) ) else: return results[0] return (None, None)
8eec36dbe48c1825d742c9834776a7a0705429b6
3,659,470
def parse_line(sample): """Parse an ndjson line and return ink (as np array) and classname.""" class_name = sample["word"] inkarray = sample["drawing"] stroke_lengths = [len(stroke[0]) for stroke in inkarray] total_points = sum(stroke_lengths) np_ink = np.zeros((total_points, 3), dtype=np.float32) current_t = 0 for stroke in inkarray: for i in [0, 1]: np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i] current_t += len(stroke[0]) np_ink[current_t - 1, 2] = 1 # stroke_end # Preprocessing. # 1. Size normalization. lower = np.min(np_ink[:, 0:2], axis=0) upper = np.max(np_ink[:, 0:2], axis=0) scale = upper - lower scale[scale == 0] = 1 np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale # 2. Compute deltas. np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2] return np_ink, class_name
19d20f7e67b58d699c0aea47f1f03095a957f757
3,659,471
def evalRPN(self, tokens): # ! 求解逆波兰式,主要利用栈 """ :type tokens: List[str] :rtype: int """ stack = [] for item in tokens: # print(stack) if item.isdigit(): stack.append(int(item)) if item[0] == '-' and len(item) > 1 and item[1:].isdigit(): stack.append(int(item)) if item == '*': num1 = stack.pop() num2 = stack.pop() stack.append(num1 * num2) if item == '/': num1 = stack.pop() num2 = stack.pop() stack.append(int(num2 / num1)) if item == '+': num1 = stack.pop() num2 = stack.pop() stack.append(num1 + num2) if item == '-': num1 = stack.pop() num2 = stack.pop() stack.append(num2 - num1) return stack[0]
6b2050f6f635324878116371cd81a6d25ea31240
3,659,472
def _validate_flags(): """Returns True if flag values are valid or prints error and returns False.""" if FLAGS.list_ports: print("Input ports: '%s'" % ( "', '".join(midi_hub.get_available_input_ports()))) print("Ouput ports: '%s'" % ( "', '".join(midi_hub.get_available_output_ports()))) return False if FLAGS.bundle_files is None: print('--bundle_files must be specified.') return False if (len(FLAGS.bundle_files.split(',')) > 1 and FLAGS.generator_select_control_number is None): tf.logging.warning( 'You have specified multiple bundle files (generators), without ' 'setting `--generator_select_control_number`. You will only be able to ' 'use the first generator (%s).', FLAGS.bundle_files[0]) return True
812791a8c71cc354a1ebe32f3fa9a3cc0f1c0182
3,659,473
def proto_test(test): """ If test is a ProtoTest, I just return it. Otherwise I create a ProtoTest out of test and return it. """ if isinstance(test, ProtoTest): return test else: return ProtoTest(test)
3326ea07ae5e4f90d3ae49cedee7b16aa97a3c65
3,659,474
def get_frames(): """Get frames for an episode Params: episode: int The episode for which the frames shall be returned Returns: frames: dict The frames for an episode per timestep """ episode = int(request.args.get('user')) frames = data_preprocessor.get_frames_for_episode(episode) return frames, 200, JSON_TYPE
1180c38175ef07f5e58ce8b77d748f6c1c1ab17b
3,659,475
def remove(s1,s2): """ Returns a copy of s, with all characters in s2 removed. Examples: remove('abc','ab') returns 'c' remove('abc','xy') returns 'abc' remove('hello world','ol') returns 'he wrd' Parameter s1: the string to copy Precondition: s1 is a string Parameter s2: the characters to remove Precondition: s2 is a string """ assert isinstance(s1) == str assert isinstance(s2) == str result = '' for x in s1: if not x in s2: result = result + x return result
089107767063309d1cc34360ae290e7fa74133e7
3,659,476
def get_issuer_plan_ids(issuer): """Given an issuer id, return all of the plan ids registered to that issuer.""" df = pd.read_csv(PATH_TO_PLANS) df = df[df.IssuerId.astype(str) == issuer] return set(df.StandardComponentId.unique())
b41b36b70000736acde63673961f92231a62f9a4
3,659,478
def parse_coords(lines): """Parse skbio's ordination results file into coords, labels, eigvals, pct_explained. Returns: - list of sample labels in order - array of coords (rows = samples, cols = axes in descending order) - list of eigenvalues - list of percent variance explained For the file format check skbio.math.stats.ordination.OrdinationResults.from_file Strategy: read the file using skbio's parser and return the objects we want """ try: pcoa_results = OrdinationResults.from_file(lines) return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals, pcoa_results.proportion_explained) except FileFormatError: if type(lines) == file: lines.seek(0) return qiime_parse_coords(lines)
fec53839f5f995f94f07120cac5bab1ba66f7b4c
3,659,480
def run_ann(model, train, test, params_save_path, iteration, optimizer, loss, callbacks=None, valid=None, shuffle_training=True, batch_size=16, num_epochs=30): """ Run analog network with cross-validation :param batch_size: batch size during training :param model: reference to the tensorflow model :param train: pair of training data (x_train, y_train) :param valid: pair of validation data (x_val, y_val) :param test: pair of testing data (x_test, y_test) :param params_save_path: output path to save weights of the network :param iteration: number of the iteration in CV :param shuffle_training: shuffle samples :param num_epochs: number of epochs to train for :return: accuracy, precision, recall, f1 and confusion matrix from the testing data """ x_train, y_train = train[0], train[1] x_test, y_test = test[0], test[1] if valid is not None: x_valid, y_valid = valid[0], valid[1] converter = nengo_dl.Converter(model) with nengo_dl.Simulator(converter.net, minibatch_size=batch_size) as simulator: simulator.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) input_layer = converter.inputs[model.get_layer('input_layer')] # get the input layer reference output_layer = converter.outputs[model.get_layer('output_layer')] # get the output layer reference # fit the model with the training data simulator.fit( x={input_layer: x_train}, y={output_layer: y_train}, validation_data=( {input_layer: x_valid}, {output_layer: y_valid} ) if valid is not None else None, epochs=num_epochs, shuffle=shuffle_training, callbacks=callbacks # early stop to avoid overfitting ) simulator.save_params(params_save_path) # save weights to the file # Get the statistics accuracy, precision, recall, f1, confusion_matrix = get_metrics(simulator, output_layer, x_test, y_test, batch_size, f'{iteration}. CNN') return { 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'confusion_matrix': confusion_matrix }
9df68d8c6cdf6df08177bd1cc5d3116c10ae073e
3,659,481
def get_sector(session, sector_name=None, sector_id=None): """ Get a sector by it's name or id. """ return get_by_name_or_id(session, Sector, model_id=sector_id, name=sector_name)
69de99bbdd630fb0cc5412c2b3124dff819287ed
3,659,482
def is_valid_pre_6_2_version(xml): """Returns whether the given XML object corresponds to an XML output file of Quantum ESPRESSO pw.x pre v6.2 :param xml: a parsed XML output file :return: boolean, True when the XML was produced by Quantum ESPRESSO with the old XML format """ element_header = xml.find('HEADER') if element_header is None: return False element_format = element_header.find('FORMAT') if element_format is None: return False try: name = element_format.attrib['NAME'] except KeyError: return False if name != 'QEXML': return False return True
80bda73addc68a88b2a1dc5828c0553cbaf7e6f2
3,659,483
import warnings def exportdf (df =None, refout:str =None, to:str =None, savepath:str =None, modname:str ='_wexported_', reset_index:bool =True): """ Export dataframe ``df`` to `refout` files. `refout` file can be Excell sheet file or '.json' file. To get more details about the `writef` decorator , see :doc:`watex.utils.decorator.writef`. :param refout: Output filename. If not given will be created refering to the exported date. :param to: Export type; Can be `.xlsx` , `.csv`, `.json` and else. :param savepath: Path to save the `refout` filename. If not given will be created. :param modname: Folder to hold the `refout` file. Change it accordingly. :returns: - `df_`: new dataframe to be exported. """ if df is None : warnings.warn( 'Once ``df`` arguments in decorator :`class:~decorator.writef`' ' is selected. The main type of file ready to be written MUST be ' 'a pd.DataFrame format. If not an error raises. Please refer to ' ':doc:`~.utils.decorator.writef` for more details.') raise Wex.WATexError_file_handling( 'No dataframe detected. Please provided your dataFrame.') df_ =df.copy(deep=True) if reset_index is True : df_.reset_index(inplace =True) if savepath is None : savepath = savePath(modname) return df_, to, refout, savepath, reset_index
0bc6d2750f236c5f3e529b2489be47658ddbf2d9
3,659,484
def clean_bpoa_seniority_list(csv): """Clean a digitized BPOA seniority list.""" dirty = pd.read_csv(csv) clean = pd.DataFrame() clean["job_title"] = dirty["Rank"] clean["last_name"] = dirty["Last name"] clean["first_name"] = dirty["First Name"] clean = clean.apply(correct_name, axis=1) clean["star_no"] = dirty["Badge No."] clean["employment_date"] = dirty["Hire Date"].apply(pd.to_datetime) return clean
b1af748d92c4cdced4a77fd3799dada318c0f57e
3,659,485
def addMovieElement(findings, data): """ Helper Function which handles unavailable information for each movie""" if len(findings) != 0: data.append(findings[0]) else: data.append("") return data
af3c45c8b8d4c0cb7ba1cac4925d0f5998affe93
3,659,487
from typing import Optional def get_bst_using_min_and_max_value(preorder: list) -> Node: """ time complexity: O(n) space complexity: O(n) """ def construct_tree(min_: int, max_: int) -> Optional[Node]: nonlocal pre_index nonlocal l if pre_index >= l: return None value = preorder[pre_index] if min_ < value < max_: node = Node(value) pre_index += 1 node.left = construct_tree(min_, value) node.right = construct_tree(value, max_) return node return None pre_index: int = 0 l: int = len(preorder) return construct_tree(-1_000_000, 1_000_000)
809c74967e73c82a428f317d8551432bb392d5ea
3,659,488
import math def qwtStepSize(intervalSize, maxSteps, base): """this version often doesn't find the best ticks: f.e for 15: 5, 10""" minStep = divideInterval(intervalSize, maxSteps, base) if minStep != 0.0: # # ticks per interval numTicks = math.ceil(abs(intervalSize / minStep)) - 1 # Do the minor steps fit into the interval? if ( qwtFuzzyCompare( (numTicks + 1) * abs(minStep), abs(intervalSize), intervalSize ) > 0 ): # The minor steps doesn't fit into the interval return 0.5 * intervalSize return minStep
57d1c4140e32dbf4a8bd0e306b9c10d4e9dae9bd
3,659,489
def get_trimmed_glyph_name(gname, num): """ Glyph names cannot have more than 31 characters. See https://docs.microsoft.com/en-us/typography/opentype/spec/... recom#39post39-table Trims an input string and appends a number to it. """ suffix = '_{}'.format(num) return gname[:31 - len(suffix)] + suffix
a5e90163d15bd4fc0b315414fffd2ac227768ab0
3,659,490
def vmatrix(ddir, file_prefix): """ generate vmatrix DataFile """ name = autofile.name.vmatrix(file_prefix) writer_ = autofile.write.vmatrix reader_ = autofile.read.vmatrix return factory.DataFile(ddir=ddir, name=name, writer_=writer_, reader_=reader_)
b9303e08f10e0604fde7b40116b74e66aac553dc
3,659,491
import ast from typing import Callable from typing import MutableMapping from typing import Union import inspect def get_argument_sources( source: Source, node: ast.Call, func: Callable, vars_only: bool, pos_only: bool ) -> MutableMapping[str, Union[ast.AST, str]]: """Get the sources for argument from an ast.Call node >>> def func(a, b, c, d=4): >>> ... >>> x = y = z = 1 >>> func(y, x, c=z) >>> # argument_sources = {'a': 'y', 'b', 'x', 'c': 'z'} >>> func(y, x, c=1) >>> # argument_sources = {'a': 'y', 'b', 'x', 'c': ast.Num(n=1)} """ # <Signature (a, b, c, d=4)> signature = inspect.signature(func, follow_wrapped=False) # func(y, x, c=z) # ['y', 'x'], {'c': 'z'} arg_sources = [argnode_source(source, argnode, vars_only) for argnode in node.args] kwarg_sources = { argnode.arg: argnode_source(source, argnode.value, vars_only) for argnode in node.keywords } if not pos_only else {} bound_args = signature.bind_partial(*arg_sources, **kwarg_sources) argument_sources = bound_args.arguments # see if *args and **kwargs have anything assigned # if not, assign () and {} to them for parameter in signature.parameters.values(): if parameter.kind == inspect.Parameter.VAR_POSITIONAL: argument_sources.setdefault(parameter.name, ()) if parameter.kind == inspect.Parameter.VAR_KEYWORD: argument_sources.setdefault(parameter.name, {}) return argument_sources
1ab344b5ccf9754ade06210e74540db51fe8c671
3,659,493
def rivers_by_station_number(stations,N): """function that uses stations_by_rivers to return a dictionary that it then itterates each river for, summing the number of stations on the river into tuples""" stationsOfRivers = stations_by_rivers(stations) listOfNumberStations = [] for river in stationsOfRivers: listOfNumberStations.append((river, len(stationsOfRivers[river]))) listofNumberStationsSorted = [] listofNumberStationsSorted = sorted_by_key(listOfNumberStations, 1, True) boo = True while boo == True: if listofNumberStationsSorted[N-1][1] == listofNumberStationsSorted[N][1]: N += 1 else: boo =False return listofNumberStationsSorted[:N]
ca159843f10cbadf5a35529c45656121672972e0
3,659,495
import itertools def generate_itoa_dict( bucket_values=[-0.33, 0, 0.33], valid_movement_direction=[1, 1, 1, 1]): """ Set cartesian product to generate action combination spaces for the fetch environments valid_movement_direction: To set """ action_space_extended = [bucket_values if m == 1 else [0] for m in valid_movement_direction] return list(itertools.product(*action_space_extended))
b8264174857aeb9d64226cce1cd1625f7e65b726
3,659,496
import dateutil from datetime import datetime def try_convert(value, datetime_to_ms=False, precise=False): """Convert a str into more useful python type (datetime, float, int, bool), if possible Some precision may be lost (e.g. Decimal converted to a float) >>> try_convert('false') False >>> try_convert('123456789.123456') 123456789.123456 >>> try_convert('1234') 1234 >>> try_convert(1234) 1234 >>> try_convert(['1234']) ['1234'] >>> try_convert('12345678901234567890123456789012345678901234567890', precise=True) 12345678901234567890123456789012345678901234567890L >>> try_convert('12345678901234567890123456789012345678901234567890.1', precise=True) Decimal('12345678901234567890123456789012345678901234567890.1') """ if not isinstance(value, basestring): return value if value in db.YES_VALUES or value in db.TRUE_VALUES: return True elif value in db.NO_VALUES or value in db.FALSE_VALUES: return False elif value in db.NULL_VALUES: return None try: if not precise: try: return int(value) except: try: return float(value) except: pass else: dec, i, f = None, None, None try: dec = Decimal(value) except: return try_convert(value, precise=False) try: i = int(value) except: try: f = float(value) except: pass if dec is not None: if dec == i: return i elif dec == f: return f return dec except: pass try: dt = dateutil.parse(value) if dt and isinstance(dt, datetime.datetime) and (3000 >= dt.year >= 1900): if datetime_to_ms: return db.datetime_in_milliseconds(dt) return dt except: pass return value
59f8a16310e4ac6604a145dcff1ff390df259da9
3,659,497
def signin(request, auth_form=AuthenticationForm, template_name='accounts/signin_form.html', redirect_field_name=REDIRECT_FIELD_NAME, redirect_signin_function=signin_redirect, extra_context=None): """ Signin using email or username with password. Signs a user in by combining email/username with password. If the combination is correct and the user :func:`is_active` the :func:`redirect_signin_function` is called with the arguments ``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is trying the login. The returned value of the function will be the URL that is redirected to. A user can also select to be remembered for ``ACCOUNTS_REMEMBER_DAYS``. :param auth_form: Form to use for signing the user in. Defaults to the :class:`AuthenticationForm` supplied by accounts. :param template_name: String defining the name of the template to use. Defaults to ``accounts/signin_form.html``. :param redirect_field_name: Form field name which contains the value for a redirect to the succeeding page. Defaults to ``next`` and is set in ``REDIRECT_FIELD_NAME`` setting. :param redirect_signin_function: Function which handles the redirect. This functions gets the value of ``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It must return a string which specifies the URI to redirect to. :param extra_context: A dictionary containing extra variables that should be passed to the rendered template. The ``form`` key is always the ``auth_form``. **Context** ``form`` Form used for authentication supplied by ``auth_form``. """ form = auth_form() if request.method == 'POST': form = auth_form(request.POST, request.FILES) if form.is_valid(): identification = form.cleaned_data['identification'] password = form.cleaned_data['password'] remember_me = form.cleaned_data['remember_me'] user = authenticate(identification=identification, password=password) if user.is_active: login(request, user) if remember_me: request.session.set_expiry(accounts_settings.ACCOUNTS_REMEMBER_ME_DAYS[1] * 86400) else: request.session.set_expiry(0) if accounts_settings.ACCOUNTS_USE_MESSAGES: messages.success(request, _('You have been signed in.'), fail_silently=True) # Whereto now? redirect_to = redirect_signin_function( request.GET.get(redirect_field_name), user) return redirect(redirect_to) else: return redirect(reverse('accounts_disabled', kwargs={'username': user.username})) if not extra_context: extra_context = dict() extra_context.update({ 'form': form, 'next': request.GET.get(redirect_field_name), }) return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
6a8536fb3a0c551ae4cdb7f01de622c012d0734c
3,659,498
import random def run_syncdb(database_info): """Make sure that the database tables are created. database_info -- a dictionary specifying the database info as dictated by Django; if None then the default database is used Return the identifier the import process should use. """ django.setup() dataset_identifier = 'default' if database_info: # create an entry in DATABASES if database_info is present dataset_identifier = '12345' while dataset_identifier in settings.DATABASES: dataset_identifier = str(random.randint(1, 2000000)) settings.DATABASES[dataset_identifier] = database_info call_command('migrate', database=dataset_identifier) return dataset_identifier
19da3e97226363fbee885ff8ee24c7abe0489d3c
3,659,499
def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False, encoder=None, encoder_kwargs=None, ignore_update_check=False): """Performs a series of automated data cleaning transformations on the provided training and testing data sets Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations from only the training set, then applying those transformations to both the training and testing set. By doing so, this function will prevent information leak from the training set into the testing set. Parameters ---------- training_dataframe: pandas.DataFrame Training data set testing_dataframe: pandas.DataFrame Testing data set drop_nans: bool Drop all rows that have a NaN in any column (default: False) copy: bool Make a copy of the data set (default: False) encoder: category_encoders transformer The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder) encoder_kwargs: category_encoders The a valid sklearn transformer to encode categorical features. Default (None) ignore_update_check: bool Do not check for the latest version of datacleaner Returns ---------- output_training_dataframe: pandas.DataFrame Cleaned training data set output_testing_dataframe: pandas.DataFrame Cleaned testing data set """ global update_checked if ignore_update_check: update_checked = True if not update_checked: update_check('datacleaner', __version__) update_checked = True if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values): raise ValueError('The training and testing DataFrames do not have the same columns. ' 'Make sure that you are providing the same columns.') if copy: training_dataframe = training_dataframe.copy() testing_dataframe = testing_dataframe.copy() if drop_nans: training_dataframe.dropna(inplace=True) testing_dataframe.dropna(inplace=True) if encoder_kwargs is None: encoder_kwargs = {} for column in training_dataframe.columns.values: # Replace NaNs with the median or mode of the column depending on the column type try: column_median = training_dataframe[column].median() training_dataframe[column].fillna(column_median, inplace=True) testing_dataframe[column].fillna(column_median, inplace=True) except TypeError: column_mode = training_dataframe[column].mode()[0] training_dataframe[column].fillna(column_mode, inplace=True) testing_dataframe[column].fillna(column_mode, inplace=True) # Encode all strings with numerical equivalents if str(training_dataframe[column].values.dtype) == 'object': if encoder is not None: column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values) else: column_encoder = LabelEncoder().fit(training_dataframe[column].values) training_dataframe[column] = column_encoder.transform(training_dataframe[column].values) testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values) return training_dataframe, testing_dataframe
37b8221193c05db97dde355e06313a9372cd8193
3,659,500
import asyncio def make_coroutine_from_tree(tree, filename="<aexec>", symbol="single", local={}): """Make a coroutine from a tree structure.""" dct = {} tree.body[0].args.args = list(map(make_arg, local)) exec(compile(tree, filename, symbol), dct) return asyncio.coroutine(dct[CORO_NAME])(**local)
cbf4e0b0278abc0e929f4ed8b2a9c421b4e8f3c6
3,659,501
def update_Sigmai(Yi, Es, Vars): """ Return new Sigma_i: shape k """ return np.mean((Yi - Es) ** 2, axis=1) + np.mean(Vars, axis=1)
f2cb1fa7f5e6b48f033207ee6bb84b8e865c863c
3,659,502
def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_offsets = tf.cast(flat_offsets, tf.int64) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) # https://github.com/tensorflow/tensorflow/issues/36236 output_tensor = tf.gather(flat_sequence_tensor*1, flat_positions) return output_tensor
583ed7ce925ace45dd2a6c9a78efd0360bd141e0
3,659,503
import typing def check_sub_schema_dict(sub_schema: typing.Any) -> dict: """Check that a sub schema in an allOf is a dict.""" if not isinstance(sub_schema, dict): raise exceptions.MalformedSchemaError( "The elements of allOf must be dictionaries." ) return sub_schema
b64313b28ab63b8342de7b0422cc8c9087a28462
3,659,504
def get_proto_root(workspace_root): """Gets the root protobuf directory. Args: workspace_root: context.label.workspace_root Returns: The directory relative to which generated include paths should be. """ if workspace_root: return "/{}".format(workspace_root) else: return ""
35cff0b28ee6c1893e5dba93593126c996ba72cc
3,659,505
def bwimcp(J, K, x, tr=.2, alpha=.05): """ Multiple comparisons for interactions in a split-plot design. The analysis is done by taking difference scores among all pairs of dependent groups and determining which of these differences differ across levels of Factor A using trimmed means. FWE is controlled via Hochberg's method. For MOM or M-estimators (possibly not implemented yet), use spmcpi which uses a bootstrap method :param J: int Number of J levels associated with Factor A :param K: int Number of K levels associated with Factor B :param x: Pandas DataFrame Each column represents a cell in the factorial design. For example, a 2x3 design would correspond to a DataFrame with 6 columns (levels of Factor A x levels of Factor B). Order your columns according to the following pattern (traversing each row in a matrix): - the first column contains data for level 1 of Factor A and level 1 of Factor B - the second column contains data for level 1 of Factor A and level 2 of Factor B - column `K` contains the data for level 1 of Factor A and level `K` of Factor B - column `K` + 1 contains the data for level 2 of Factor A and level 1 of Factor B - and so on ... :param tr: float Proportion to trim (default is .2) :param alpha: float Alpha level (default is .05) :return: Dictionary of results con: array Contrast matrix output: DataFrame Difference score, p-value, and critical value for each contrast relating to the interaction """ x=pandas_to_arrays(x) x=remove_nans_based_on_design(x, [J, K], 'between_within') MJ = (J ** 2 - J) // 2 MK = (K ** 2 - K) // 2 JMK = J * MK MJMK = MJ * MK Jm = J - 1 #output = np.zeros([MJMK, 7]) output = np.zeros([MJMK, 4]) _, _, con = con2way(J,K) m = np.array(np.arange(J*K)).reshape(J,K) ic=0 test=np.array([]) for j in range(J): for jj in range(J): if j < jj: for k in range(K): for kk in range(K): if k<kk: #output[ic, 0]=j #output[ic, 1]=jj #output[ic, 2]=k output[ic, 0]=ic x1 = x[m[j, k]] - x[m[j, kk]] x2 = x[m[jj, k]] - x[m[jj, kk]] #print(f'X1 comparing cells {j, k} to {j, kk}') #print(f'X2 comparing cells {jj, k} to {jj, kk}') temp = yuen(x1, x2) output[ic, 1] = trim_mean(x1, tr) - trim_mean(x2, tr) #output[ic, 4] = trim_mean(x1, tr) - trim_mean(x2, tr) test=np.append(test, temp['p_value']) output[ic, 2] = test[ic] #output[ic, 5] = test[ic] ic+=1 ncon = len(test) dvec = alpha / np.arange(1, ncon+1) temp2 = (-test).argsort() zvec = dvec[0:ncon] #output[temp2, 6] = zvec output[temp2, 3] = zvec #output[:, 6] = output[:, 6] output[:, 3] = output[:, 3] col_names=["con_num", "psihat", "p_value", "p_crit"] #col_names=["A_x", "A_y", "B_x", "B_y", "psihat", "p_value", "p_crit"] results=pd.DataFrame(output, columns=col_names) results={'con': con, 'output': pd.DataFrame(output, columns=col_names)} return results
82d2b77464e5bc37fc101624ed0d88205ab11ff9
3,659,506
def trigger_decoder(mode: str, trigger_path: str=None) -> tuple: """Trigger Decoder. Given a mode of operation (calibration, copy phrase, etc) and a path to the trigger location (*.txt file), this function will split into symbols (A, ..., Z), timing info (32.222), and targetness (target, nontarget). It will also extract any saved offset information and pass that back. PARAMETERS ---------- :param: mode: mode of bci operation. Note the mode changes how triggers are saved. :param: trigger_path: [Optional] path to triggers.txt file :return: tuple: symbol_info, trial_target_info, timing_info, offset. """ # Load triggers.txt if not trigger_path: trigger_path = load_txt_data() # Get every line of trigger.txt with open(trigger_path, 'r+') as text_file: # most trigger files has three columns: # SYMBOL, TARGETNESS_INFO[OPTIONAL], TIMING trigger_txt = [line.split() for line in text_file] # extract stimuli from the text stimuli_triggers = [line for line in trigger_txt if line[1] == 'target' or line[1] == 'nontarget'] # from the stimuli array, pull our the symbol information symbol_info = list(map(lambda x: x[0], stimuli_triggers)) # If operating mode is free spell, it only has 2 columns # otherwise, it has 3 if mode != 'free_spell': trial_target_info = list(map(lambda x: x[1], stimuli_triggers)) timing_info = list(map(lambda x: eval(x[2]), stimuli_triggers)) else: trial_target_info = None timing_info = list(map(lambda x: eval(x[1]), stimuli_triggers)) # Get any offset or calibration triggers offset_array = [line[2] for line in trigger_txt if line[0] == 'offset'] calib_trigger_array = [line[2] for line in trigger_txt if line[0] == 'calibration_trigger'] # If present, calculate the offset between the DAQ and Triggers from display if len(offset_array) == 1 and len(calib_trigger_array) == 1: # Extract the offset and calibration trigger time offset_time = float(offset_array[0]) calib_trigger_time = float(calib_trigger_array[0]) # Calculate the offset (ASSUMES DAQ STARTED FIRST!) offset = offset_time - calib_trigger_time # Otherwise, assume no observed offset else: offset = 0 return symbol_info, trial_target_info, timing_info, offset
e4d19203e655173f638dc38c0123f88c7342aed1
3,659,507
def method_comparison(filename=None, extension="png", usetex=False, passed_ax=None, **kwargs): """ Create a plot comparing how estimated redshift changes as a function of dispersion measure for each DM-z relation. Parameters ---------- filename: string or None, optional The filename of the saved figure. Default: *None* extension: string, optional The format to save the figure. e.g "png", "pdf", "eps", etc... Default: "png" usetex: bool, optional Use LaTeX for for fonts. passed_ax: or None, optional Generates --------- A figure displaying how estimated redshift changes as a function of dispersion measure for each of the different cosmologies. """ set_rc_params(usetex) if passed_ax: ax = passed_ax else: fig = plt.figure(figsize=(8, 8), constrained_layout=True) ax = fig.add_subplot(111) method_list = methods.available_methods() dm_vals = np.linspace(0, 3000, 1000) colours = ["#1b9e77", "#d95f02", "#7570b3"] label = [r"$\rm{Ioka 2003}$", r"$\rm{Inoue 2004}$", r"$\rm{Zhang 2018}$"] for j, method in enumerate(method_list): z_vals = np.zeros(len(dm_vals)) if 'cosmology' in kwargs: cosmology = kwargs['cosmology'] else: cosmology = 'Planck18' table_name = "".join(["_".join([method, cosmology]), ".npz"]) lookup_table = table.load(table_name) for i, dm in enumerate(dm_vals): z_vals[i] = table.get_z_from_table(dm, lookup_table) ax.plot(dm_vals, z_vals, colours[j], label=label[j], **kwargs) if not passed_ax: ax.set_ylabel(r"$\rm{Redshift}$") ax.set_xlabel(r"$\rm{DM\ \left[pc \ cm^{-3}\right]}$") ax.legend(loc='lower right', frameon=False) if filename is not None: plt.savefig(".".join([filename, extension])) if passed_ax: return ax else: return fig
8c3714cca3aac5f0f7893dc981b68265bf6cea9f
3,659,508
def logCompression(pilImg): """Does log compression processing on a photo Args: pilImg (PIL Image format image): Image to be processed """ npImg = PILtoNumpy(pilImg) c = 255 / (np.log10(1 + np.amax(npImg))) for all_pixels in np.nditer(npImg, op_flags=['readwrite']): all_pixels[...] = c * np.log10(1 + all_pixels) return NumpytoPIL(npImg)
d6ab559182e7c836823d4c51fb6af395c1cd875f
3,659,509
def quantile_turnover(quantile_factor, quantile, period=1): """ Computes the proportion of names in a factor quantile that were not in that quantile in the previous period. Parameters ---------- quantile_factor : pd.Series DataFrame with date, asset and factor quantile. quantile : int Quantile on which to perform turnover analysis. period: int, optional Number of days over which to calculate the turnover. Returns ------- quant_turnover : pd.Series Period by period turnover for that quantile. """ quant_names = quantile_factor[quantile_factor == quantile] quant_name_sets = quant_names.groupby(level=['date']).apply( lambda x: set(x.index.get_level_values('asset'))) name_shifted = quant_name_sets.shift(period) new_names = (quant_name_sets - name_shifted).dropna() quant_turnover = new_names.apply( lambda x: len(x)) / quant_name_sets.apply(lambda x: len(x)) quant_turnover.name = quantile return quant_turnover
6c7b2afdd4c4f0a2dbf38064d2d8664a25370ca2
3,659,510
def dmp_div(f, g, u, K): """ Polynomial division with remainder in ``K[X]``. Examples ======== >>> from sympy.polys import ring, ZZ, QQ >>> R, x,y = ring("x,y", ZZ) >>> R.dmp_div(x**2 + x*y, 2*x + 2) (0, x**2 + x*y) >>> R, x,y = ring("x,y", QQ) >>> R.dmp_div(x**2 + x*y, 2*x + 2) (1/2*x + 1/2*y - 1/2, -y + 1) """ if K.is_Field: return dmp_ff_div(f, g, u, K) else: return dmp_rr_div(f, g, u, K)
1b8f2b2b9d57899862234233a70e7e76100b86be
3,659,511
def is_designated_holiday(timestamp): """ Returns True if the date is one of Piedmont’s "designated holidays": - New Years Day (January 1st) - Memorial Day (last Monday of May) - Independence Day (July 4th) - Labor Day (First Monday of September) - Thanksgiving Day (4th Thursday in November) - Christmas Day (December 25th) """ dow = timestamp.weekday() day = timestamp.day month = timestamp.month if month == JANUARY and timestamp.day == 1: return True elif month == MAY and dow == MONDAY and day > 25: return True elif month == JULY and day == 4: return True elif month == SEPTEMBER and dow == MONDAY and day < 8: return True elif month == NOVEMBER and dow == THURSDAY and 21 < day < 29: return True elif month == DECEMBER and day == 25: return True else: return False
e6137ac2c3258a3e51294ff432971c04f56137ec
3,659,512
def check(val, desc=None, as_warn=False) -> SimpleAssertions: """ function based assertion call :param val: val to check :param desc: optional, description of val :param as_warn: if set, convert assertion error to warning message :return: assertionClass """ return SimpleAssertions(as_warn=as_warn).check(val, desc)
2115a0b16387cea0fef483a26a6c27daaf72387e
3,659,513
def ChangeExtension(filename, newExtension): """ChangeExtension(filename, newExtension) -> str Replaces the extension of the filename with the given one. If the given filename has no extension, the new extension is simply appended. arguments: filename string corresponding to the filename whose extension to change. newExtension string corresponding to the new extension to append. Do not prepend with a period ('.'). returns: string corresponding to the new filename. """ try: # Isolate the filename slashIndex = filename.rfind('/') backslashIndex = filename.rfind('\\') if (backslashIndex > slashIndex): slashIndex = backslashIndex; # Look for an existing extension periodIndex = filename.rfind('.') if (periodIndex > slashIndex): return filename[0 : periodIndex] + "." + newExtension else: return filename + "." + newExtension except IndexError, e: return ""
0909060e01226520280aeabde906ab9a8f0dfc5d
3,659,514
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_masks": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "sent_label_ids": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100, seed=np.random.randint(10000)) d = d.apply(tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn
733ddf8b7add0cf9c610537cab0c31172260f0de
3,659,515
def synchrotron_thin_spectrum(freqs, ne, te, bfield): """Optically thin (unobsorbed) synchrotron spectrum. Units of erg/cm^3/s/Hz NY95b Eq 3.9 """ const = 4.43e-30 # erg/cm^3/s/Hz theta_e = K_BLTZ * te / (MELC * SPLC * SPLC) v0 = QELC * bfield / (2*np.pi*MELC*SPLC) xm = 2*freqs/(3*v0*np.square(theta_e)) iprime = _synch_fit_func_iprime(xm) esyn = const * 4*np.pi*ne*freqs*iprime/sp.special.kn(2, 1/theta_e) return esyn
1334cf0382eecd298472b4717b220a7ac3e96d0e
3,659,516
import base64 def create_message(service, to, subject, message_text): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ message = MIMEText(message_text) users = service.users() myProfile = users.getProfile(userId='me').execute() message['to'] = to message['from'] = myProfile['emailAddress'] message['subject'] = subject return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
6c6df7b1825c13cb8016840527b4dd81ac26d266
3,659,518
def numpy_ndarray(nb_arr): """Return a copy of numba DeviceNDArray data as a numpy.ndarray. """ return nb_arr.copy_to_host()
d6ee1c62428783344fe6232ef229a6dabc8f2a2f
3,659,519
def convert_to_dict(my_keys, my_values): """Merge a given list of keys and a list of values into a dictionary. Args: my_keys (list): A list of keys my_values (list): A list corresponding values Returns: Dict: Dictionary of the list of keys mapped to the list of values """ return dict(zip(my_keys, my_values))
e00690d27770539e6b9d2166835f6bd1b9c11c5a
3,659,520
def add_vit(request): """ Add a new vit with API, currently image and video are not supported """ user = KeyBackend().authenticate(request) if request.method == "POST": if request.user.is_authenticated: form = VitForm(request.POST) if form.is_valid(): vit = form.save(commit=False) vit.user = request.user vit.save() return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201) else: return JsonResponse({'error': 'No vit body provided'}, status=400) else: return JsonResponse({'error': 'You must be logged in'}, status=401) else: return JsonResponse({'error': 'Invalid request'}, status=400)
726776e036678cb79051d6ac800d5d883b947320
3,659,521
def long_control_state_trans(active, long_control_state, v_ego, v_target, v_pid, output_gb, brake_pressed, cruise_standstill, min_speed_can): """Update longitudinal control state machine""" stopping_target_speed = min_speed_can + STOPPING_TARGET_SPEED_OFFSET stopping_condition = (v_ego < 2.0 and cruise_standstill) or \ (v_ego < STOPPING_EGO_SPEED and ((v_pid < stopping_target_speed and v_target < stopping_target_speed) or brake_pressed)) starting_condition = v_target > STARTING_TARGET_SPEED and not cruise_standstill if not active: long_control_state = LongCtrlState.off else: if long_control_state == LongCtrlState.off: if active: long_control_state = LongCtrlState.pid elif long_control_state == LongCtrlState.pid: if stopping_condition: long_control_state = LongCtrlState.stopping elif long_control_state == LongCtrlState.stopping: if starting_condition: long_control_state = LongCtrlState.starting elif long_control_state == LongCtrlState.starting: if stopping_condition: long_control_state = LongCtrlState.stopping elif output_gb >= -BRAKE_THRESHOLD_TO_PID: long_control_state = LongCtrlState.pid return long_control_state
f13a5db692ce92fe36204eade10ebb2d54b9caed
3,659,522
def load_group_to_namedtuple(group: h5py.Group): """Returns namedtuple with name of group and key: values of group attrs e.g. srs1 group which has gpib: 1... will be returned as an srs1 namedtuple with .gpib etc """ # Check it was stored as a namedTuple if group.attrs.get('description', None) != 'NamedTuple': raise ValueError( f'Trying to load_group_to_named_tuple which has description: {group.attrs.get("description", None)}') # Get the name of the NamedTuple either through the stored name or the group name name = group.attrs.get('NT_name', None) if name is None: logger.warning('Did not find "name" attribute for NamedTuple, using folder name instead') name = group.name.split('/')[-1] # d = {key: val for key, val in group.attrs.items()} d = {key: get_attr(group, key) for key in group.attrs.keys()} # Remove HDF only descriptors for k in ['description', 'NT_name']: if k in d.keys(): del d[k] # Make the NamedTuple ntuple = namedtuple(name, d.keys()) filled_tuple = ntuple(**d) # Put values into tuple return filled_tuple
e33c0e1b367ddd2ebb745397d473c00452ba853f
3,659,523
import json def export_json(blocks=None, subsections=False): """ Returns JSON representation of parsed config structure :param blocks: List of blocks to export :param subsections: Export all subblocks :return: JSON-dumped string """ if blocks is not None: blocks = [_canonicalize_blockid(b) for b in blocks] if subsections: blocks = get_subblocks(blocks) return json.dumps(_config_dict(blocks))
90ae2ee10d6d23f091d079bd87478fa10d3a4083
3,659,524
def get_dummy_message(text): """Get a dummy message with a custom text""" return botogram.objects.messages.Message({ "message_id": 1, "from": {"id": 123, "first_name": "Nobody"}, "chat": {"id": -123, "type": "chat", "title": "Something"}, "date": 1, "text": text, })
0f39712381157b46aed345ef6b46c6b3cfe32d95
3,659,525
import time def list_ga4_entities(admin_api): """Get a dictionary of GA4 entity settings based on type. Args: admin_api: The Admin API object. Returns: A dictionary of GA4 entity setting lists. """ entities = { 'ga4_account_summaries': [], 'ga4_accounts': [], 'ga4_properties': [], 'ga4_data_streams': [], 'ga4_measurement_protocol_secrets': [], 'ga4_conversion_events': [], 'ga4_custom_dimensions': [], 'ga4_custom_metrics': [], 'ga4_dv360_link_proposals': [], 'ga4_dv360_links': [], 'ga4_firebase_links': [], 'ga4_google_ads_links': [] } for account_summary in admin_api.list_account_summaries(): a_dict = { 'name': account_summary.name, 'display_name': account_summary.display_name, 'account': account_summary.account, 'property_summaries': [] } for property_summary in account_summary.property_summaries: p_dict = { 'property': property_summary.property, 'display_name': property_summary.display_name } a_dict['property_summaries'].append(p_dict) entities['ga4_account_summaries'].append(a_dict) time.sleep(REQUEST_DELAY) for account in admin_api.list_accounts(): account_dict = { 'name': account.name, 'display_name': account.display_name, 'create_time': account.create_time, 'update_time': account.update_time, 'region_code': account.region_code, 'deleted': account.deleted } entities['ga4_accounts'].append(account_dict) time.sleep(REQUEST_DELAY) for account_summary in entities['ga4_account_summaries']: prop_request = ListPropertiesRequest( filter=f"parent:{account_summary['account']}") for prop in admin_api.list_properties(prop_request): time.sleep(REQUEST_DELAY) data_retention_settings = admin_api.get_data_retention_settings( name=(prop.name + '/dataRetentionSettings')) time.sleep(REQUEST_DELAY) google_signals_settings = admin_api.get_google_signals_settings( name=(prop.name + '/googleSignalsSettings')) ic_enum = prop.industry_category sl_enum = prop.service_level gss_state_enum = google_signals_settings.state gss_consent_enum = google_signals_settings.consent edr_enum = data_retention_settings.event_data_retention prop_dict = { 'name': prop.name, 'create_time': prop.create_time, 'update_time': prop.update_time, 'parent': prop.parent, 'display_name': prop.display_name, 'industry_category': IndustryCategory(ic_enum).name, 'time_zone': prop.time_zone, 'currency_code': prop.currency_code, 'service_level': ServiceLevel(sl_enum).name, 'delete_time': prop.delete_time, 'expire_time': prop.expire_time, 'account': account_summary['account'], 'data_sharing_settings': { 'name': data_retention_settings.name, 'event_data_retention': (DataRetentionSettings .RetentionDuration(edr_enum).name), 'reset_user_data_on_new_activity': data_retention_settings.reset_user_data_on_new_activity }, 'google_signals_settings': { 'name': google_signals_settings.name, 'state': GoogleSignalsState(gss_state_enum).name, 'consent': GoogleSignalsConsent(gss_consent_enum).name } } entities['ga4_properties'].append(prop_dict) for property_summary in account_summary['property_summaries']: time.sleep(REQUEST_DELAY) for data_stream in admin_api.list_data_streams( parent=property_summary['property']): data_stream_dict = { 'name': data_stream.name, 'type': DataStream.DataStreamType(data_stream.type_).name, 'display_name': data_stream.display_name, 'create_time': data_stream.create_time, 'update_time': data_stream.update_time, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } if data_stream.web_stream_data != None: data_stream_dict['web_stream_data'] = { 'measurment_id': data_stream.web_stream_data.measurement_id, 'firebase_app_id': data_stream.web_stream_data.firebase_app_id, 'default_uri': data_stream.web_stream_data.default_uri } time.sleep(REQUEST_DELAY) for mps in admin_api.list_measurement_protocol_secrets( parent=data_stream.name): mps_dict = { 'name': mps.name, 'display_name': mps.display_name, 'secret_value': mps.secret_value, 'stream_name': data_stream.name, 'type': DataStream.DataStreamType(data_stream.type_).name, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_measurement_protocol_secrets'].append(mps_dict) if data_stream.android_app_stream_data != None: data_stream_dict['android_app_stream_data'] = { 'firebase_app_id': (data_stream .android_app_stream_data.firebase_app_id), 'package_name': data_stream.android_app_stream_data.package_name } time.sleep(REQUEST_DELAY) for mps in admin_api.list_measurement_protocol_secrets( parent=data_stream.name): mps_dict = { 'name': mps.name, 'display_name': mps.display_name, 'secret_value': mps.secret_value, 'stream_name': data_stream.name, 'type': DataStream.DataStreamType(data_stream.type_).name, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_measurement_protocol_secrets'].append(mps_dict) if data_stream.ios_app_stream_data != None: data_stream_dict['ios_app_stream_data'] = { 'firebase_app_id': data_stream.ios_app_stream_data.firebase_app_id, 'bundle_id': data_stream.ios_app_stream_data.bundle_id } time.sleep(REQUEST_DELAY) for mps in admin_api.list_measurement_protocol_secrets( parent=data_stream.name): mps_dict = { 'name': mps.name, 'display_name': mps.display_name, 'secret_value': mps.secret_value, 'stream_name': data_stream.name, 'type': DataStream.DataStreamType(data_stream.type_).name, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_measurement_protocol_secrets'].append(mps_dict) entities['ga4_data_streams'].append(data_stream_dict) time.sleep(REQUEST_DELAY) for event in admin_api.list_conversion_events( parent=property_summary['property']): event_dict = { 'name': event.name, 'event_name': event.event_name, 'create_time': event.create_time, 'deletable': event.deletable, 'custom': event.custom, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_conversion_events'].append(event_dict) time.sleep(REQUEST_DELAY) for cd in admin_api.list_custom_dimensions( parent=property_summary['property']): cd_dict = { 'name': cd.name, 'parameter_name': cd.parameter_name, 'display_name': cd.display_name, 'description': cd.description, 'scope': cd.scope, 'disallow_ads_personalization': cd.disallow_ads_personalization, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_custom_dimensions'].append(cd_dict) time.sleep(REQUEST_DELAY) for cm in admin_api.list_custom_metrics( parent=property_summary['property']): cm_dict = { 'name': cm.name, 'parameter_name': cm.parameter_name, 'display_name': cm.display_name, 'description': cm.description, 'scope': cm.scope, 'measurement_unit': cm.measurement_unit, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_custom_metrics'].append(cm_dict) time.sleep(REQUEST_DELAY) for link in admin_api.list_google_ads_links( parent=property_summary['property']): link_dict = { 'name': link.name, 'customer_id': link.customer_id, 'can_manage_clients': link.can_manage_clients, 'ads_personalization_enabled': link.ads_personalization_enabled, 'create_time': link.create_time, 'update_time': link.update_time, 'creator_email_address': link.creator_email_address, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_google_ads_links'].append(link_dict) time.sleep(REQUEST_DELAY) for link in admin_api.list_firebase_links( parent=property_summary['property']): link_dict = { 'name': link.name, 'project': link.project, 'create_time': link.create_time, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_firebase_links'].append(link_dict) time.sleep(REQUEST_DELAY) for link in admin_api.list_display_video360_advertiser_links( parent=property_summary['property']): link_dict = { 'name': link.name, 'advertiser_id': link.advertiser_id, 'advertiser_display_name': link.advertiser_display_name, 'ads_personalization_enabled': link.ads_personalization_enabled, 'campaign_data_sharing_enabled': link.campaign_data_sharing_enabled, 'cost_data_sharing_enabled': link.cost_data_sharing_enabled, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_dv360_links'].append(link_dict) time.sleep(REQUEST_DELAY) for proposal in ( admin_api.list_display_video360_advertiser_link_proposals( parent=property_summary['property'])): lpip_enum = (proposal.link_proposal_status_details .link_proposal_initiating_product) lps_enum = (proposal.link_proposal_status_details .link_proposal_state) proposals_dict = { 'name': proposal.name, 'advertiser_id': proposal.adveriser_id, 'link_proposal_status_details': { 'link_proposal_initiating_product': LinkProposalInitiatingProduct(lpip_enum).name, 'requestor_email': proposal.link_proposal_status_details.requestor_email, 'link_proposal_state': LinkProposalState(lps_enum).name }, 'advertiser_display_name': proposal.advertiser_display_name, 'validation_email': proposal.validation_email, 'ads_personalization_enabled': proposal.ads_personalization_enabled, 'campaign_data_sharing_enabled': proposal.campaign_data_sharing_enabled, 'cost_data_sharing_enabled': proposal.cost_data_sharing_enabled, 'property': property_summary['property'], 'property_display_name': property_summary['display_name'] } entities['ga4_dv360_link_proposals'].append(proposal_dict) return entities
83b11c1a001a593da07f6aeb4333bad623bb7ee4
3,659,526
def print_sig(expr): """ Arguments: - `expr`: """ return "{0!s} × {1!s}".format(expr.dom, expr.body)
be8d6fb1ad2256e2a825e383859f72db93318864
3,659,528
def is_grounded_concept(c: Concept) -> bool: """ Check if a concept is grounded """ return ( "UN" in c.db_refs and c.db_refs["UN"][0][0].split("/")[1] != "properties" )
2447b289cec20efc2aa359f37a795fd231004030
3,659,529
def _get_form(app, parent_form, factory_method, force_disable_csrf=False): """Create and fill a form.""" class AForm(parent_form): pass with app.test_request_context(): extra = _update_with_csrf_disabled() if force_disable_csrf else {} RF = factory_method(AForm) rf = RF(**extra) rf.profile.username.data = "my username" rf.profile.full_name.data = "My full name" rf.validate() return rf
b109d983dcf123812ede664719ab56f5462e84d4
3,659,530
def get_root_disk_size(): """ Get size of the root disk """ context = pyudev.Context() rootfs_node = get_rootfs_node() size_gib = 0 for device in context.list_devices(DEVTYPE='disk'): # /dev/nvmeXn1 259 are for NVME devices major = device['MAJOR'] if (major == '8' or major == '3' or major == '253' or major == '259'): devname = device['DEVNAME'] if devname == rootfs_node: try: size_gib = parse_fdisk(devname) except Exception as e: LOG.error("Could not retrieve disk size - %s " % e) # Do not break config script, just return size 0 break break return size_gib
4c01e189dfb4460d118fbd9b94c6a07e420c3bb1
3,659,531
import numpy def convert_to_premultiplied_png(file): """ http://stackoverflow.com/questions/6591361/method-for-converting-pngs-to-premultiplied-alpha """ logger.info("converting to premultiplied alpha") im = Img.open(file).convert('RGBA') a = numpy.fromstring(im.tobytes(), dtype=numpy.uint8) a = a.astype(numpy.float64) alpha_layer = a[3::4] / 255.0 a[::4] *= alpha_layer a[1::4] *= alpha_layer a[2::4] *= alpha_layer im = Img.frombytes("RGBA", im.size, a.astype(numpy.uint8).tostring()) f = BytesIO() im.save(f, 'png') f.seek(0) return f
2a2ebf9e3d1152e2d143ba799aab0ff0927653a8
3,659,532
def DSQuery(dstype, objectname, attribute=None): """DirectoryServices query. Args: dstype: The type of objects to query. user, group. objectname: the object to query. attribute: the optional attribute to query. Returns: If an attribute is specified, the value of the attribute. Otherwise, the entire plist. Raises: DSException: Cannot query DirectoryServices. """ ds_path = '/%ss/%s' % (dstype.capitalize(), objectname) cmd = [_DSCL, '-plist', '.', '-read', ds_path] if attribute: cmd.append(attribute) (stdout, stderr, returncode) = RunProcess(cmd) if returncode: raise DSException('Cannot query %s for %s: %s' % (ds_path, attribute, stderr)) plist = NSString.stringWithString_(stdout).propertyList() if attribute: value = None if 'dsAttrTypeStandard:%s' % attribute in plist: value = plist['dsAttrTypeStandard:%s' % attribute] elif attribute in plist: value = plist[attribute] try: # We're copying to a new list to convert from NSCFArray return value[:] except TypeError: # ... unless we can't return value else: return plist
2dea68b5897a46c90d2f8cf24e42519c272e70f1
3,659,533
def calculate_offset(lon, first_element_value): """ Calculate the number of elements to roll the dataset by in order to have longitude from within requested bounds. :param lon: longitude coordinate of xarray dataset. :param first_element_value: the value of the first element of the longitude array to roll to. """ # get resolution of data res = lon.values[1] - lon.values[0] # calculate how many degrees to move by to have lon[0] of rolled subset as lower bound of request diff = lon.values[0] - first_element_value # work out how many elements to roll by to roll data by 1 degree index = 1 / res # calculate the corresponding offset needed to change data by diff offset = int(round(diff * index)) return offset
a55eee1dd11b1b052d67ab1abadfc8087c1a2fe0
3,659,534
def min_mean_col(m: ma.MaskedArray) -> int: """Calculate the index of the column with the smallest mean. """ if ma.count_masked(m) == m.size: return -1 col_mean = np.nanmean(m, axis=0) return np.argmin(col_mean)
499b7d5db38edc222aac6517d87d9df30285cb37
3,659,535
def test_data_alignment(role_value, should_pass, check_model): """Test a custom model which returns a good and alignments from data(). qtmodeltest should capture this problem and fail when that happens. """ class MyModel(qt_api.QAbstractListModel): def rowCount(self, parent=qt_api.QtCore.QModelIndex()): return 1 if parent == qt_api.QtCore.QModelIndex() else 0 def data( self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.DisplayRole ): if role == qt_api.QtCore.Qt.TextAlignmentRole: return role_value elif role == qt_api.QtCore.Qt.DisplayRole: if index == self.index(0, 0): return "Hello" return None check_model(MyModel(), should_pass=should_pass)
4ebed3384cf5c694d72235e703b6f9594de5ff7b
3,659,539
def cov(a, b): """Return the sample covariance of vectors a and b""" a = flex.double(a) b = flex.double(b) n = len(a) assert n == len(b) resid_a = a - flex.mean(a) resid_b = b - flex.mean(b) return flex.sum(resid_a*resid_b) / (n - 1)
94505852671e4652f96daa7b8e61f759aeca1dda
3,659,540
from bs4 import BeautifulSoup import re def beautify(soup: BeautifulSoup, rich_terminal: bool = True) -> str: """ Cleans up the raw HTML so it's more presentable. Parse BeautifulSoup HTML and return prettified string """ beautifiedText = str() for i in soup: if rich_terminal: term = Terminal() span_sub = r"{t.italic}\1{t.normal}".format(t=term) strong_sub = r"{t.bold}\1{t.normal}".format(t=term) else: span_sub = r"\1" strong_sub = r"\1" i = re.sub(r'<span class="\w+">(.+)</span>', span_sub, str(i),) i = re.sub(r"<strong>(.+)</strong>", strong_sub, str(i)) beautifiedText += " " + i # Remove leading whitespace. beautifiedText = re.sub(r"^\s+", "", beautifiedText) # Compress all whitespace to a single space. beautifiedText = re.sub(r"\s{2,}", " ", beautifiedText) # Trim whitespace immediately preceding common punctuation. beautifiedText = re.sub(r"\s+([,\)\].;:])", r"\g<1>", beautifiedText) # Trim whitespace immediately following common punctuation. beautifiedText = re.sub(r"([\(])\s+", r"\g<1>", beautifiedText) return beautifiedText
df79666ad0ec9592e1a24325813b59e2d9711636
3,659,541
def design_complexity(design: Design) -> int: """Returns an approximation of the design's complexity to create.""" diversity = 3 * len(design.required) abundance = 2 * sum(design.required.values()) return diversity + abundance + design.additional
b5be6336ce037d010bbb274dc6ce5538ac6ecae8
3,659,542