content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_file_from_project(proj: Project, file_path): """ Returns a file object (or None, if error) from the HEAD of the default branch in the repo. The default branch is usually 'main'. """ try: file = proj.files.raw(file_path=file_path, ref=proj.default_branch) LintReport.trace(f'Accessing \'{file_path}\' from {proj.name}.') return file except gitlab.GitlabGetError as _: LintReport.trace( f'Problem accessing \'{file_path}\' from {proj.name}.') return None
796203fabf6f25403f24e6c3f50d93f5e20d1d80
905
def get_logger_by_name(name: str): """ Gets the logger given the type of logger :param name: Name of the value function needed :type name: string :returns: Logger """ if name not in logger_registry.keys(): raise NotImplementedError else: return logger_registry[name]
b17b0ad215f25940b751f995c6f7cd441f6cd4e6
906
def gen_appr_(): """ 16 consonants """ appr_ = list(voiced_approximant) appr_.extend(unvoiced_approximant) appr_.extend(voiced_lateral_approximant) return appr_
948d52aa38ec03f0f3b21dcd6c2c5e60d30cdbb3
907
from typing import Union from typing import Iterable def convert_unit( to_convert: Union[float, int, Iterable[Union[float, int, Iterable]]], old_unit: Union[str, float, int], new_unit: Union[str, float, int], ) -> Union[float, tuple]: """ Convert a number or sequence of numbers from one unit to another. If either unit is a number it will be treated as the number of points per unit. So 72 would mean 1 inch. Args: to_convert (float, int, Iterable): The number / list of numbers, or points, to convert old_unit (str, float, int): A unit accepted by fpdf.FPDF or a number new_unit (str, float, int): A unit accepted by fpdf.FPDF or a number Returns: (float, tuple): to_convert converted from old_unit to new_unit or a tuple of the same """ unit_conversion_factor = get_scale_factor(new_unit) / get_scale_factor(old_unit) if isinstance(to_convert, Iterable): return tuple( map(lambda i: convert_unit(i, 1, unit_conversion_factor), to_convert) ) return to_convert / unit_conversion_factor
e4ac4f5ba405151d45cbab0b04fcf55a9710a0bf
908
from typing import Union from typing import Tuple def image_preprocess(image, image_size: Union[int, Tuple[int, int]]): """Preprocess image for inference. Args: image: input image, can be a tensor or a numpy arary. image_size: single integer of image size for square image or tuple of two integers, in the format of (image_height, image_width). Returns: (image, scale): a tuple of processed image and its scale. """ input_processor = dataloader.DetectionInputProcessor(image, image_size) input_processor.normalize_image() input_processor.set_scale_factors_to_output_size() image = input_processor.resize_and_crop_image() image_scale = input_processor.image_scale_to_original return image, image_scale
091b2c3098bbf72a02a203486938c354719b3c83
909
def create_cluster_meta(cluster_groups): """Return a ClusterMeta instance with cluster group support.""" meta = ClusterMeta() meta.add_field('group') cluster_groups = cluster_groups or {} data = {c: {'group': v} for c, v in cluster_groups.items()} meta.from_dict(data) return meta
01c96d966c0c581c6d72cf7a8fb67cec9fd41d6e
910
def dict_has_key_and_value_include_str(the_dict,key,str): """指定字典中包括键,并且键值包含某个字符片段""" if the_dict.__contains__(key): if str in the_dict[key]: return True return False
56058581914233c9520986db7f80c4b879443e97
911
def get_wrapper_depth(wrapper): """Return depth of wrapper function.""" return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
2b6dbfc817416b8e5bce486ec12dad09281fb7b6
913
def get_formsets(what, extra=0, **kwargs): """Returns a list of formset instances""" try: related_fields = {} relation_config = get_form_config('Relations', **kwargs) operation = 'create' if 'Create' in what else 'update' for relation in relation_config: field_config = relation_config[relation] related_fields[relation] = get_form_fields(operation, field_config) def get_related_model(relation): """Returns related model""" args = get_app_model_as_params(**kwargs) args.pop() args.append(relation) return apps.get_model(*args) return [inlineformset_factory( get_model(**kwargs), get_related_model(relation), fields=related_fields[relation], extra=extra ) for relation in related_fields] except KeyError: return []
39b6ec430c245cf54cc1d28abaf89271237ef961
914
def round_even(number): """Takes a number and returns it rounded even""" # decimal.getcontext() -> ROUND_HALF_EVEN is default return Decimal(number).quantize(0)
2b19200a1a10597976fe29eaf6363cf59212241e
915
def _build_conditional_single(cond, vals, model_cls=None): """ Builds the single conditional portion of a where clause. Args: cond (()/[]): The tuple/list containing the elements for a single conditional statement. See Model.query_direct() docs for full details on the format. vals ({str:str/int/bool/datetime/enum/etc}): The mapping of variable names as they will be used within parameterized format (i.e. `%(<>)s` format) in the returned `clause`. This is expected to contain all variables already built into the where clause currently being processed and will be modified here if a value/variable is part of the conditional. model_cls (Class<Model<>> or None): The class itself of the model holding the valid column names. Can be None if skipping that check for increased performance, but this is ONLY recommended if the source of the column names in the structured `where` parameter is internally controlled and was not subject to external user input to avoid SQL injection attacks. Returns: (str): The portion of the clause that represents this single conditional. Any variables will be in parameterized format (i.e. `%(<>)s` format). Note that the `vals` provided will be modified by adding any new variables included in this portion of the clause. Raises: (NonexistentColumnError): Raised if the column provided in the `cond` does not exist in the official list of columns in the provided model (only possible if model_cls provided as non-None). (ValueError): Raised if the LogicOp provided as part of the `cond` is not a valid LogicOp option for this Orm. """ if model_cls is not None: _validate_cols([cond[0]], model_cls) if cond[1] is model_meta.LogicOp.NOT_NULL: return f'{cond[0]} NOT NULL' # The rest below have a value, so all would use same key val_key = f'wval{str(len(vals))}' if cond[1] is model_meta.LogicOp.EQ \ or cond[1] is model_meta.LogicOp.EQUAL \ or cond[1] is model_meta.LogicOp.EQUALS: vals[val_key] = cond[2] return f'{cond[0]} = %({val_key})s' if cond[1] is model_meta.LogicOp.LT \ or cond[1] is model_meta.LogicOp.LESS_THAN: vals[val_key] = cond[2] return f'{cond[0]} < %({val_key})s' if cond[1] is model_meta.LogicOp.LTE \ or cond[1] is model_meta.LogicOp.LESS_THAN_OR_EQUAL: vals[val_key] = cond[2] return f'{cond[0]} <= %({val_key})s' if cond[1] is model_meta.LogicOp.GT \ or cond[1] is model_meta.LogicOp.GREATER_THAN: vals[val_key] = cond[2] return f'{cond[0]} > %({val_key})s' if cond[1] is model_meta.LogicOp.GTE \ or cond[1] is model_meta.LogicOp.GREATER_THAN_OR_EQUAL: vals[val_key] = cond[2] return f'{cond[0]} >= %({val_key})s' err_msg = f'Invalid or Unsupported Logic Op: {cond[1]}' logger.error(err_msg) raise ValueError(err_msg)
bb61133f25901321df41f06fa5407cb98c596f88
916
def isNullOutpoint(tx): """ isNullOutpoint determines whether or not a previous transaction output point is set. """ nullInOP = tx.txIn[0].previousOutPoint if ( nullInOP.index == wire.MaxUint32 and nullInOP.hash == ByteArray(0, length=HASH_SIZE) and nullInOP.tree == wire.TxTreeRegular ): return True return False
ac68a81dfabd7415136b5bfe0c38b6b551048e88
917
def cmyk_to_rgb(c, m, y, k): """ """ r = (1.0 - c) * (1.0 - k) g = (1.0 - m) * (1.0 - k) b = (1.0 - y) * (1.0 - k) return r, g, b
03ece22efe6f88ff6e9f2825c72bcb4b18a238ef
918
def get_by_id(group_id: int, db: Session = Depends(get_db), member: MemberModel = Depends(get_active_member)): """Get group by id""" item = service.get_by_id(db, group_id) return item
035e5c0d74de017778f82052c5341f7c69b9dd8a
919
def inMandelSet(x: int, y: int, max_iteration: int) -> int: """inMandelSet determines if complex(x,y) is in the mandelbrot set.""" z = 0 for k in range(max_iteration): z = z ** 2 + complex(x,y) if abs(z) > 2: return k return k
404beb051d0982c081a6564793017282451fa44b
924
def isBinaryPalindrome(num): """assumes num is an integer returns True if num in binary form is a palindrome, else False""" return str(bin(num))[2::] == str(bin(num))[:1:-1]
0181811e57964cb056391618084d9473b6c845e3
925
def mprv_from_entropy(entropy: GenericEntropy, passphrase: str, lang: str, xversion: bytes) -> bytes: """Return a BIP32 master private key from entropy.""" mnemonic = mnemonic_from_entropy(entropy, lang) mprv = mprv_from_mnemonic(mnemonic, passphrase, xversion) return mprv
7dc9ce4c25f9b84f16731eb37be371de95187a8b
927
def analyze_audio(audio_filename, target_freq=TARGET_FREQS, win_size=5000, step=200, min_delay=BEEP_DURATION, sensitivity=250, verbose=True): """ Analyze the given audio file to find the tone markers, with the respective frequency and time position. :param str audio_filename: The Audio filename to analyze to find the markers. :param tuple target_freq: A tuple containing the int frequencies ( in Hertz ) that the function should recognize. :param int win_size: The size of the moving window for the analysys. Increasing the window increases the accuracy but takes longer. :param int step: the increment between each window. :param float min_delay: Minimum duration, in seconds, of the beep to be recognized. :param int sensitivity: Minimum value of relative amplitude of the beep to be recognized. :param bool verbose: If true, print some info on the screen. :return: a list of dict containing the markers positions and frequencies. """ print("Analyzing the Audio...") # Open the wav audio track # Get the sample rate (fs) and the sample data (data) fs, data = wavfile.read(audio_filename) # Calculate the duration, in seconds, of a sample sample_duration = 1.0 / fs # Get the total number of samples total_samples = data.shape[0] # Calculate the frequencies that the fourier transform can analyze frequencies = np.fft.fftfreq(win_size) # Convert them to Hertz hz_frequencies = frequencies * fs # Calculate the indexes of the frequencies that are compatible with the target_freq freq_indexes = [] for freq in target_freq: # Find the index of the nearest element index = (np.abs(hz_frequencies - freq)).argmin() freq_indexes.append(index) # This will hold the duration of each frequency pulse duration_count = {} # Initialize the dictionary for freq in target_freq: duration_count[freq] = 0 # Initialize the counter count = 0 # This list will hold the analysis result results = [] # Analyze the audio dividing the samples into windows, and analyzing each # one separately for window in mit.windowed(data, n=win_size, step=step, fillvalue=0): # Calculate the FFT of the current window fft_data = np.fft.fft(window) # Calculate the amplitude of the transform fft_abs = np.absolute(window) # Calculate the mean of the amplitude fft_mean = np.mean(fft_abs) # Calculate the current time of the window ctime = count * sample_duration # Check, for each target frequency, if present for i, freq in enumerate(target_freq): # Get the relative amplitude of the current frequency freq_amplitude = abs(fft_data[freq_indexes[i]]) / fft_mean # If the amplitude is greater than the sensitivity, # Increase the duration counter for the current frequency if freq_amplitude > sensitivity: duration_count[freq] += step * sample_duration else: # If the duration is greater than the minimum delay, add the result if duration_count[freq] > min_delay: results.append({'time': ctime, 'freq': freq}) # Print the result if verbose if verbose: print("--> found freq:", freq, "time:", ctime) duration_count[freq] = 0 count += step # Print the progress every 100000 samples if verbose and count % 100000 == 0: percent = round((count/total_samples) * 100) print("\rAnalyzing {}% ".format(percent), end="") print() # Reset the new line return results
63a5dfd65075b592662309082630011c234a3d52
928
import json def read_usgs_file(file_name): """ Reads a USGS JSON data file (from https://waterdata.usgs.gov/nwis) Parameters ---------- file_name : str Name of USGS JSON data file Returns ------- data : pandas DataFrame Data indexed by datetime with columns named according to the parameter's variable description """ with open(file_name) as json_file: text = json.load(json_file) data = _read_usgs_json(text) return data
cfba1da7bb5f34a18292dc914f8128cab538850e
929
def get_cantus_firmus(notes): """ Given a list of notes as integers, will return the lilypond notes for the cantus firmus. """ result = "" # Ensure the notes are in range normalised = [note for note in notes if note > 0 and note < 18] if not normalised: return result # Set the duration against the first note. result = NOTES[normalised[0]] + " 1 " # Translate all the others. result += " ".join([NOTES[note] for note in normalised[1:]]) # End with a double bar. result += ' \\bar "|."' # Tidy up double spaces. result = result.replace(" ", " ") return result
d193088a6665df363d032f69b6fd3db80c8bce4a
930
def get_wildcard_values(config): """Get user-supplied wildcard values.""" return dict(wc.split("=") for wc in config.get("wildcards", []))
0ca15b82ebed47dec9d46991cb4db45ee72eb3af
931
def predict(model_filepath, config, input_data): """Return prediction from user input.""" # Load model model = Model.load(model_filepath + config['predicting']['model_name']) # Predict prediction = int(np.round(model.predict(input_data), -3)[0]) return prediction
afc61eaba1265efded59f182fa6639a3d2e534e2
932
def gauss3D_FIT(xyz, x0, y0, z0, sigma_x, sigma_y, sigma_z): """ gauss3D_FIT((x,y,z),x0,y0,z0,sigma_x,sigma_y,sigma_z) Returns the value of a gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. xyz - x0, y0, z0 = the x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z = The std. deviations of the Gaussian. Note ----- Be careful about the indexing used in meshgrid and the order in which you pass the x, y, z variables in. Parameters ---------- xyz: tuple of ndarrays A tuple containing the 3D arrays of points (from meshgrid) x0, y0, z0: float The x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z: float The standard deviations of the Gaussian. Returns ------- g3_norm: ndarray A flattened array for fitting. """ x0 = float(x0) y0 = float(y0) z0 = float(z0) x = xyz[0] y = xyz[1] z = xyz[2] g3 = np.exp( -( (x - x0) ** 2 / (2 * sigma_x ** 2) + (y - y0) ** 2 / (2 * sigma_y ** 2) + (z - z0) ** 2 / (2 * sigma_z ** 2) ) ) g3_norm = g3 / np.max(g3.flatten()) return g3_norm.ravel()
8e4337760c8064fb553361240f2cfa04ec379c76
934
async def tell(message: str) -> None: """Send a message to the user. Args: message: The message to send to the user. """ return await interaction_context().tell(message)
8e82ceece1896b2b8cc805cf30cca79e64e0cf4e
935
def PGetDim (inFFT): """ Get dimension of an FFT returns array of 7 elements * inFFT = Python Obit FFT """ ################################################################ # Checks if not PIsA(inFFT): raise TypeError("inFFT MUST be a Python Obit FFT") return Obit.FFTGetDim(inFFT.me) # end PGetDim
c95f80f465f0f69a2e144bf4b52a2e7965c8f87c
936
def score_detail(fpl_data): """ convert fpl_data into Series Index- multi-index of team, pos, player, opp, minutes """ l =[] basic_index = ["player", "opp", "minutes"] for i in range(len(fpl_data["elements"])): ts=achived_from(fpl_data, i, True) name = (fpl_data["elements"][i]["first_name"]+ fpl_data["elements"][i]["second_name"]) if len(ts)==0: continue ts=pd.concat([ts,], keys=[name], names=basic_index) ele = pos_map(fpl_data)[fpl_data["elements"][i]['element_type']] ts=pd.concat([ts,], keys=[ele], names=["pos"]+basic_index) team = team_map(fpl_data)[fpl_data["elements"][i]['team']] ts=pd.concat([ts,], keys=[team], names=["team", "pos"]+basic_index) l.append(ts) return pd.concat(l)
fd70f92efffb42e8d5849f4fa2eaf090e87daa57
937
def edition_view(measurement, workspace, exopy_qtbot): """Start plugins and add measurements before creating the execution view. """ pl = measurement.plugin pl.edited_measurements.add(measurement) measurement.root_task.add_child_task(0, BreakTask(name='Test')) item = MeasurementEditorDockItem(workspace=workspace, measurement=measurement, name='test') return DockItemTestingWindow(widget=item)
f84ed466468b9732c9aef9c3fc9244a5e57583cd
938
def menu_items(): """ Add a menu item which allows users to specify their session directory """ def change_session_folder(): global session_dir path = str(QtGui.QFileDialog.getExistingDirectory(None, 'Browse to new session folder -')) session_dir = path utils.setrootdir(path) writetolog("*" * 79 + "\n" + "*" * 79) writetolog(" output directory: " + session_dir) writetolog("*" * 79 + "\n" + "*" * 79) lst = [] lst.append(("Change session folder", change_session_folder)) return(lst)
ec5177e53eaa1a2de38276ca95d41f944dd9d4a3
939
def calculate_pair_energy_np(coordinates, i_particle, box_length, cutoff): """ Calculates the interaction energy of one particle with all others in system. Parameters: ``````````` coordinates : np.ndarray 2D array of [x,y,z] coordinates for all particles in the system i_particle : int the particle row for which to calculate energy box_length : float the length of the simulation box cutoff : float the cutoff interaction length Returns: ```````` e_total : float the pairwise energy between the i-th particle and other particles in system """ particle = coordinates[i_particle][:] coordinates = np.delete(coordinates, i_particle, 0) e_array = np.zeros(coordinates.shape) dist = calculate_distance_np(particle, coordinates, box_length) e_array = dist[dist < cutoff] e_array = calculate_LJ_np(e_array) e_total = e_array.sum() return e_total
fecc44e54b4cbef12e6b197c34971fc54a91d3ce
940
def inside_loop(iter): """ >>> inside_loop([1,2,3]) 3 >>> inside_loop([]) Traceback (most recent call last): ... UnboundLocalError: local variable 'i' referenced before assignment """ for i in iter: pass return i
c94720cddec7d3d151c9aea8d8d360564fbffe66
941
def _pattern_data_from_form(form, point_set): """Handles the form in which the user determines which algorithms to run with the uploaded file, and computes the algorithm results. Args: form: The form data point_set: Point set representation of the uploaded file. Returns: Musical pattern discovery results of the algorithms chosen by the user. """ pattern_data = [] # SIATEC min_pattern_length = form.getlist('siatec-min-pattern-length') min_pattern_length = [int(x) for x in min_pattern_length] for i in range(len(min_pattern_length)): pattern_data.append( siatec.compute( point_set=point_set, min_pattern_length=min_pattern_length[i] ) ) # timewarp-invariant algorithm window = form.getlist('timewarp-window') window = [int(x) for x in window] min_pattern_length = form.getlist('timewarp-min-pattern-length') min_pattern_length = [int(x) for x in min_pattern_length] for i in range(len(window)): pattern_data.append( time_warp_invariant.compute( point_set=point_set, window=window[i], min_pattern_length=min_pattern_length[i] ) ) return pattern_data
ba69a058fd6a641166ebf4040dc7f780fc8b1a1e
942
def group(help_doc): """Creates group options instance in module options instnace""" return __options.group(help_doc)
a715353bb86ecd511522283c941a66830926a1d3
943
from io import StringIO def convert_pdf_to_txt(path, pageid=None): """ This function scrambles the text. There may be values for LAParams that fix it but that seems difficult so see getMonters instead. This function is based on convert_pdf_to_txt(path) from RattleyCooper's Oct 21 '14 at 19:47 answer edited by Trenton McKinney Oct 4 '19 at 4:10 on <https://stackoverflow.com/a/26495057>. Keyword arguments: pageid -- Only process this page id. """ rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() try: device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) except TypeError as ex: if ("codec" in str(ex)) and ("unexpected keyword" in str(ex)): device = TextConverter(rsrcmgr, retstr, laparams=laparams) fp = open(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True): # print("page: {}".format(dir(page))) if (pageid is None) or (pageid == page.pageid): print("page.pageid: {}".format(page.pageid)) interpreter.process_page(page) if pageid is not None: break text = retstr.getvalue() print(text) fp.close() device.close() retstr.close() return text
9c215c539054bd88c5d7f2bf9d38e904fc53b0d6
944
def safe_str(val, default=None): """Safely cast value to str, Optional: Pass default value. Returned if casting fails. Args: val: default: Returns: """ if val is None: return default if default is not None else '' return safe_cast(val, str, default)
d5abb2426de99aa8aac22660ce53fa4aec6424e3
946
def mod2(): """ Create a simple model for incorporation tests """ class mod2(mod1): def __init__(self, name, description): super().__init__(name, "Model 1") self.a = self.createVariable("a",dimless,"a") self.b = self.createVariable("b",dimless,"b") self.c = self.createParameter("c",dimless,"c") self.c.setValue(2.) eq21 = self.a() + self.b() + self.c() eq22 = self.b() - self.f() self.createEquation("eq21", "Generic equation 2.1", eq21) self.createEquation("eq22", "Generic equation 2.2", eq22) mod = mod2("M2", "Model 2") mod() return mod
cef4ad517971a1eb00ece97b7d90be1895e1ab0f
947
def zero_order(freq,theta,lcandidat,NumTopic): """ Calculate the Zero-Order Relevance Parameters: ---------- freq : Array containing the frequency of occurrences of each word in the whole corpus theta : Array containing the frequency of occurrences of each word in each topic lcandidat: Array containing each label candidate NumTopic : The number of the topic Returns: ------- topCandidate : Array containing the name of the top 10 score candidate for a given topic """ #W matrice qui contient le score de chaque mot pour chaque topic W=np.log(theta/freq) # score des tous les candidats pour le topic NumTopic score=np.array([]) for indice in range (len(lCandidat)): candidat=lCandidat[indice].split(" ") i=id2word.doc2idx(candidat) # supprime les -1 (qui signifie pas trouvé) i[:] = [v for v in i if v != -1] score=np.append(score,np.sum(W[k,i])) #topValue, topCandidate = top10Score(score,lCandidat) dicti=top10ScoreCandidat(score,lcandidat) return dicti
38cd3207b375db06302cb063270c180bc4b9617b
948
def compute_check_letter(dni_number: str) -> str: """ Given a DNI number, obtain the correct check letter. :param dni_number: a valid dni number. :return: the check letter for the number as an uppercase, single character string. """ return UPPERCASE_CHECK_LETTERS[int(dni_number) % 23]
58a7d54db2736351aef4957f17ed55ce13af7f0a
949
import time def uptime_check(delay=1): """Performs uptime checks to two URLs Args: delay: The number of seconds delay between two uptime checks, optional, defaults to 1 second. Returns: A dictionary, where the keys are the URL checked, the values are the corresponding status (1=UP, 0=DOWN) """ urls = ["https://httpstat.us/503", "https://httpstat.us/200"] url_status = {} for url in urls: url_status[url] = check_url(url)[0] time.sleep(delay) return url_status
69c8f76a28ec0cb59f08252d8d2bcb04fc85782e
950
def entropy_column(input): """returns column entropy of entropy matrix. input is motifs""" nucleotides = {'A': 0, 'T': 0, 'C': 0, 'G': 0} for item in input: nucleotides[item] = nucleotides[item]+1 for key in nucleotides: temp_res = nucleotides[key]/len(input) if temp_res > 0: nucleotides[key] = temp_res * abs(log2(temp_res)) else: continue sum = 0 for key in nucleotides: sum = sum + nucleotides[key] # print(nucleotides) return sum
3079f7b5d40e02f00b7f36de6ad6df9ff6b6ec41
951
def sumVoteCount(instance): """ Returns the sum of the vote count of the instance. :param instance: The instance. :type instance: preflibtools.instance.preflibinstance.PreflibInstance :return: The sum of vote count of the instance. :rtype: int """ return instance.sumVoteCount
6683e31a2e5ec9904c5f35e60622310b6688a635
953
def get_user_solutions(username): """Returns all solutions submitted by the specified user. Args: username: The username. Returns: A solution list. Raises: KeyError: If the specified user is not found. """ user = _db.users.find_one({'_id': username}) if not user: raise KeyError('User not found: %s' % username) solutions = _db.solutions.find( { 'owner': user['_id'] }, projection=('resemblance_int', 'solution_size', 'problem_id', '_id')) # manually select the best (and oldest) solution table = {} for solution in solutions: problem_id = solution['problem_id'] if problem_id in table: old_solution = table[problem_id] if solution['resemblance_int'] > old_solution['resemblance_int'] or \ (solution['resemblance_int'] == old_solution['resemblance_int'] and solution['_id'] < old_solution['_id']): table[problem_id] = solution else: table[problem_id] = solution # sort by problem_id solutions = table.values() solutions.sort(key=lambda solution: solution['problem_id']) return solutions
b1257962ee52707d39988ec1cc535c390df064e6
956
def add_standard_attention_hparams(hparams): """Adds the hparams used by get_standadized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # hparams used and which should have been defined outside (in # common_hparams): # Global flags # hparams.mode # hparams.hidden_size # Pre-post processing flags # hparams.layer_preprocess_sequence # hparams.layer_postprocess_sequence # hparams.layer_prepostprocess_dropout # hparams.norm_type # hparams.norm_epsilon # Mixture-of-Expert flags # hparams.moe_hidden_sizes # hparams.moe_num_experts # hparams.moe_k # hparams.moe_loss_coef # Attention layers flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) # Attention: Local (unmasked only): How much to look left. hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") # Fully connected layers flags # To be more consistent, should use filter_size to also control the MOE # size if moe_hidden_sizes not set. hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
de9f1a3b30a105a89d3400ca0b36e4c747f1ab46
958
def get_df1_df2(X: np.array, y: np.array) -> [DataFrame, DataFrame]: """ Get DataFrames for points with labels 1 and -1 :param X: :param y: :return: """ x1 = np.array([X[:, i] for i in range(y.shape[0]) if y[i] == 1]).T x2 = np.array([X[:, i] for i in range(y.shape[0]) if y[i] == -1]).T df1 = DataFrame({'x': list(), 'y': list()}) df2 = DataFrame({'x': list(), 'y': list()}) if len(x1 > 0): df1 = DataFrame({'x': x1[0], 'y': x1[1]}) if len(x2 > 0): df2 = DataFrame({'x': x2[0], 'y': x2[1]}) return [df1, df2]
783a69a9be0e56ca3509fa38845df4f1533ef45e
960
import base64 def dnsip6encode(data): """ encodes the data as a single IPv6 address :param data: data to encode :return: encoded form """ if len(data) != 16: print_error("dnsip6encode: data is more or less than 16 bytes, cannot encode") return None res = b'' reslen = 0 for i in range(len(data)): res += base64.b16encode(data[i:i+1]) reslen += 1 if reslen % 2 == 0: res += b':' return res[:-1]
0055029150c1a125b88ac5f5700d8bf2fb70d9c2
961
def gcm_send_bulk_message(registration_ids, data, encoding='utf-8', **kwargs): """ Standalone method to send bulk gcm notifications """ messenger = GCMMessenger(registration_ids, data, encoding=encoding, **kwargs) return messenger.send_bulk()
cace5a07d0b903d0f4aa1694faf7366ea7b9c928
962
import torch def apply_net_video(net, arr, argmax_output=True, full_faces='auto'): """Apply a preloaded network to input array coming from a video of one eye. Note that there is (intentionally) no function that both loads the net and applies it; loading the net should ideally only be done once no matter how many times it is run on arrays. Arguments: net: Network loaded by load_net arr: numpy array of shape (h, w, 3) or (batch_size, h, w, 3) with colors in RGB order generally (h, w) = (4000, 6000) for full faces and (4000, 3000) for half-faces although inputs are all resized to (256, 256) argmax_output: if True, apply argmax to output values to get categorical mask full_faces: whether inputs are to be treated as full faces; note that the networks take half-faces By default, base decision on input size Returns: Segmentation mask and potentially regression output. Regression output present if a regression-generating network was used Segmentation mask a numpy array of shape (batch_size, h, w) if argmax_output else (batch_size, h, w, num_classes) Regression output a numpy array of shape (batch_size, 4) for half-faces or (batch_size, 8) for full faces; one iris's entry is in the format (x,y,r,p) with p the predicted probability of iris presence; for full faces, each entry is (*right_iris, *left_iris)""" if len(arr.shape)==3: arr = arr[np.newaxis] tens = torch.tensor(arr.transpose(0,3,1,2), dtype=torch.float) orig_tens_size = tens.size()[2:] input_tensor = F.interpolate(tens, size=(256,256), mode='bilinear', align_corners=False) input_tensor = input_tensor.cuda() with torch.no_grad(): output = net(input_tensor) if 'reg' in net.outtype: seg, reg = output reg = reg.detach().cpu().numpy() reg = np.concatenate([reg[:,:3], sigmoid(reg[:,3:])], 1) else: seg = output segmentation = seg.detach().cpu() segmentation = F.interpolate(segmentation, size=orig_tens_size, mode='bilinear', align_corners=False) seg_arr = segmentation.numpy().transpose(0,2,3,1) seg_arr = cleanupseg(seg_arr) if argmax_output: seg_arr = np.argmax(seg_arr, 3) if 'reg' in net.outtype: return seg_arr, reg else: return seg_arr
3d6acd156761c651572a8b6a27d8511b2e88cc20
963
def Storeligandnames(csv_file): """It identifies the names of the ligands in the csv file PARAMETERS ---------- csv_file : filename of the csv file with the ligands RETURNS ------- lig_list : list of ligand names (list of strings) """ Lig = open(csv_file,"rt") lig_aux = [] for ligand in Lig: lig_aux.append(ligand.replace(" ","_").replace("\n","").lower()) return lig_aux
dc4510a4ea946eaf00152cb445acdc7535ce0379
964
def chunk_to_rose(station): """ Builds data suitable for Plotly's wind roses from a subset of data. Given a subset of data, group by direction and speed. Return accumulator of whatever the results of the incoming chunk are. """ # bin into three different petal count categories: 8pt, 16pt, and 26pt bin_list = [ list(range(5, 356, 10)), list(np.arange(11.25, 349, 22.5)), list(np.arange(22.5, 338, 45)), ] bname_list = [ list(range(1, 36)), list(np.arange(2.25, 34, 2.25)), list(np.arange(4.5, 32, 4.5)), ] # Accumulator dataframe. proc_cols = [ "sid", "direction_class", "speed_range", "count", "frequency", "decade", "pcount", ] accumulator = pd.DataFrame(columns=proc_cols) for bins, bin_names, pcount in zip(bin_list, bname_list, [36, 16, 8]): # Assign directions to bins. # We'll use the exceptional 'NaN' class to represent # 355º - 5º, which would otherwise be annoying. # Assign 0 to that direction class. ds = pd.cut(station["wd"], bins, labels=bin_names) station = station.assign(direction_class=ds.cat.add_categories("0").fillna("0")) # First compute yearly data. # For each direction class... directions = station.groupby(["direction_class"]) for direction, d_group in directions: # For each wind speed range bucket... for bucket, bucket_info in speed_ranges.items(): d = d_group.loc[ ( station["ws"].between( bucket_info["range"][0], bucket_info["range"][1], inclusive=True, ) == True ) ] count = len(d.index) full_count = len(station.index) frequency = 0 if full_count > 0: frequency = round(((count / full_count) * 100), 2) accumulator = accumulator.append( { "sid": station["sid"].values[0], "direction_class": direction, "speed_range": bucket, "count": count, "frequency": frequency, "decade": station["decade"].iloc[0], "month": station["month"].iloc[0], "pcount": pcount, }, ignore_index=True, ) accumulator = accumulator.astype( {"direction_class": np.float32, "count": np.int32, "frequency": np.float32,} ) return accumulator
70adc8fe1ec4649ac6f58131f7bb893760cf6b8c
965
def loadKiosk(eventid): """Renders kiosk for specified event.""" event = Event.get_by_id(eventid) return render_template("/events/eventKiosk.html", event = event, eventid = eventid)
19acab2648c1d32c5214a42797347d8563996abd
966
def bson_encode(data: ENCODE_TYPES) -> bytes: """ Encodes ``data`` to bytes. BSON records in list are delimited by '\u241E'. """ if data is None: return b"" elif isinstance(data, list): encoded = BSON_RECORD_DELIM.join(_bson_encode_single(r) for r in data) # We are going to put a delimiter right at the head as a signal that this is # a list of bson files, even if it is only one record encoded = BSON_RECORD_DELIM + encoded return encoded else: return _bson_encode_single(data)
1fe61cc9c38d34c42d20478671c179c8f76606b0
967
def _GetTailStartingTimestamp(filters, offset=None): """Returns the starting timestamp to start streaming logs from. Args: filters: [str], existing filters, should not contain timestamp constraints. offset: int, how many entries ago we should pick the starting timestamp. If not provided, unix time zero will be returned. Returns: str, A timestamp that can be used as lower bound or None if no lower bound is necessary. """ if not offset: return None entries = list(logging_common.FetchLogs(log_filter=' AND '.join(filters), order_by='DESC', limit=offset)) if len(entries) < offset: return None return list(entries)[-1].timestamp
0362df8948a1762e85cfaaa8c32565d9f1517132
968
def main(data_config_file, app_config_file): """Print delta table schemas.""" logger.info('data config: ' + data_config_file) logger.info('app config: ' + app_config_file) # load configs ConfigSet(name=DATA_CFG, config_file=data_config_file) cfg = ConfigSet(name=APP_CFG, config_file=app_config_file) # get list of delta tables to load tables = cfg.get_value(DATA_CFG + '::$.load_delta') for table in tables: path = table['path'] spark = SparkConfig().spark_session(config_name=APP_CFG, app_name="grapb_db") df = spark.read.format('delta').load(path) df.printSchema() return 0
de3247618664a38245a9ad60129dbe1881ee84c6
969
def porosity_to_n(porosity,GaN_n,air_n): """Convert a porosity to a refractive index. using the volume averaging theory""" porous_n = np.sqrt((1-porosity)*GaN_n*GaN_n + porosity*air_n*air_n) return porous_n
a4fa765b1870823731cefa5747a0078bbf4d4b4e
970
def _indexing_coordi(data, coordi_size, itm2idx): """ function: fashion item numbering """ print('indexing fashion coordi') vec = [] for d in range(len(data)): vec_crd = [] for itm in data[d]: ss = np.array([itm2idx[j][itm[j]] for j in range(coordi_size)]) vec_crd.append(ss) vec_crd = np.array(vec_crd, dtype='int32') vec.append(vec_crd) return np.array(vec, dtype='int32')
b3ee0594c7090742ba2dcb65545a31cd73f7805b
971
def plot_precentile(arr_sim, arr_ref, num_bins=1000, show_top_percentile=1.0): """ Plot top percentile (as specified by show_top_percentile) of best restults in arr_sim and compare against reference values in arr_ref. Args: ------- arr_sim: numpy array Array of similarity values to evaluate. arr_ref: numpy array Array of reference values to evaluate the quality of arr_sim. num_bins: int Number of bins to divide data (default = 1000) show_top_percentile Choose which part to plot. Will plot the top 'show_top_percentile' part of all similarity values given in arr_sim. Default = 1.0 """ start = int(arr_sim.shape[0] * show_top_percentile / 100) idx = np.argpartition(arr_sim, -start) starting_point = arr_sim[idx[-start]] if starting_point == 0: print("not enough datapoints != 0 above given top-precentile") # Remove all data below show_top_percentile low_as = np.where(arr_sim < starting_point)[0] length_selected = arr_sim.shape[0] - low_as.shape[0] # start+1 data = np.zeros((2, length_selected)) data[0, :] = np.delete(arr_sim, low_as) data[1, :] = np.delete(arr_ref, low_as) data = data[:, np.lexsort((data[1, :], data[0, :]))] ref_score_cum = [] for i in range(num_bins): low = int(i * length_selected / num_bins) # high = int((i+1) * length_selected/num_bins) ref_score_cum.append(np.mean(data[1, low:])) ref_score_cum = np.array(ref_score_cum) fig, ax = plt.subplots(figsize=(6, 6)) plt.plot( (show_top_percentile / num_bins * (1 + np.arange(num_bins)))[::-1], ref_score_cum, color='black') plt.xlabel("Top percentile of spectral similarity score g(s,s')") plt.ylabel("Mean molecular similarity (f(t,t') within that percentile)") return ref_score_cum
f2c024350ccba4dca83bb38ab6742d0e18cb7d3e
972
def set_xfce4_shortcut_avail(act_args, key, progs): """Set the shortcut associated with the given key to the first available program""" for cmdline in progs: # Split the command line to find the used program cmd_split = cmdline.split(None, 1) cmd_split[0] = find_prog_in_path(cmd_split[0]) if cmd_split[0] is not None: return set_xfce4_shortcut(act_args, key, ' '.join(cmd_split)) logger.warning("no program found for shortcut %s", key) return True
1b67e66fc7dd5b8aa4ca86dd8d7028af824b1cf7
973
def accesscontrol(check_fn): """Decorator for access controlled callables. In the example scenario where access control is based solely on user names (user objects are `str`), the following is an example usage of this decorator:: @accesscontrol(lambda user: user == 'bob') def only_bob_can_call_this(): pass Class methods are decorated in the same way. :param check_fn: A callable, taking a user object argument, and returning a boolean value, indicating whether the user (user object argument) is allowed access to the decorated callable.""" if not callable(check_fn): raise TypeError(check_fn) def decorator(wrapped): @wraps(wrapped) def decorated(*args, **kwargs): if ACL.current_user is None: raise AccessDeniedError(decorated) if not ACL.managed_funcs[decorated](ACL.current_user): raise AccessDeniedError(decorated) return wrapped(*args, **kwargs) ACL.managed_funcs[decorated] = check_fn return decorated return decorator
ec0deb22e40d3a03e7c9fadbb6b7085b1c955925
974
def positionPctProfit(): """ Position Percent Profit The percentage profit/loss of each position. Returns a dictionary with market symbol keys and percent values. :return: dictionary """ psnpct = dict() for position in portfolio: # Strings are returned from API; convert to floating point type current = float(position.current_price) entry = float(position.avg_entry_price) psnpct[position.symbol] = ((current - entry) / entry) * 100 return psnpct
b0abb40edeb6ff79abe29f916c6996e851627ab4
976
def _parse_fields(vel_field, corr_vel_field): """ Parse and return the radar fields for dealiasing. """ if vel_field is None: vel_field = get_field_name('velocity') if corr_vel_field is None: corr_vel_field = get_field_name('corrected_velocity') return vel_field, corr_vel_field
8a0d8a4148ddc3757bc437de3dc942fd6b4db1b3
977
def get_species_charge(species): """ Returns the species charge (only electrons so far """ if(species=="electron"): return qe else: raise ValueError(f'get_species_charge: Species "{species}" is not supported.')
24b0f091973dc5165194fc3063256413f14cd372
978
from typing import Dict from typing import Any from typing import Callable def orjson_dumps( obj: Dict[str, Any], *, default: Callable[..., Any] = pydantic_encoder ) -> str: """Default `json_dumps` for TIA. Args: obj (BaseModel): The object to 'dump'. default (Callable[..., Any], optional): The default encoder. Defaults to pydantic_encoder. Returns: str: The json formatted string of the object. """ return orjson.dumps(obj, default=default).decode("utf-8")
b66cc4ea1ecd372711086cfeb831d690bcfa5ecd
979
def KNN_classification(dataset, filename): """ Classification of data with k-nearest neighbors, followed by plotting of ROC and PR curves. Parameters --- dataset: the input dataset, containing training and test split data, and the corresponding labels for binding- and non-binding sequences. filename: an identifier to distinguish different plots from each other. Returns --- stats: array containing classification accuracy, precision and recall """ # Import and one hot encode training/test set X_train, X_test, y_train, y_test = prepare_data(dataset) # Fitting classifier to the training set KNN_classifier = KNeighborsClassifier( n_neighbors=100, metric='minkowski', p=2) KNN_classifier.fit(X_train, y_train) # Predicting the test set results y_pred = KNN_classifier.predict(X_test) y_score = KNN_classifier.predict_proba(X_test) # ROC curve title = 'KNN ROC curve (Train={})'.format(filename) plot_ROC_curve( y_test, y_score[:, 1], plot_title=title, plot_dir='figures/KNN_ROC_Test_{}.png'.format(filename) ) # Precision-recall curve title = 'KNN Precision-Recall curve (Train={})'.format(filename) plot_PR_curve( y_test, y_score[:, 1], plot_title=title, plot_dir='figures/KNN_P-R_Test_{}.png'.format(filename) ) # Calculate statistics stats = calc_stat(y_test, y_pred) # Return statistics return stats
b559ada6ace9c685cd7863a177f3f7224a5b5a69
980
def projection_error(pts_3d: np.ndarray, camera_k: np.ndarray, pred_pose: np.ndarray, gt_pose: np.ndarray): """ Average distance of projections of object model vertices [px] :param pts_3d: model points, shape of (n, 3) :param camera_k: camera intrinsic matrix, shape of (3, 3) :param pred_pose: predicted rotation and translation, shape (3, 4), [R|t] :param gt_pose: ground truth rotation and translation, shape (3, 4), [R|t] :return: the returned error, unit is pixel """ # projection shape (n, 2) pred_projection: np.ndarray = project_3d_2d(pts_3d=pts_3d, camera_intrinsic=camera_k, transformation=pred_pose) gt_projection: np.ndarray = project_3d_2d(pts_3d=pts_3d, camera_intrinsic=camera_k, transformation=gt_pose) error = np.linalg.norm(gt_projection - pred_projection, axis=1).mean() return error
846f8b468f180fcc2cd48c4ff7dc9ca21338b7b3
981
def ph_update(dump, line, ax, high_contrast): """ :param dump: Believe this is needed as garbage data goes into first parameter :param line: The line to be updated :param ax: The plot the line is currently on :param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast Description: Updates the ph line plot after pulling new data. """ plt.cla() update_data() values = pd.Series(dataList[3]) if(high_contrast): line = ax.plot(values, linewidth=3.0) else: line = ax.plot(values) return line
22b1648a2c2d5fc479cb23f2aa6365b0a2d9669c
982
def get_percent_match(uri, ucTableName): """ Get percent match from USEARCH Args: uri: URI of part ucTableName: UClust table Returns: Percent match if available, else -1 """ with open(ucTableName, 'r') as read: uc_reader = read.read() lines = uc_reader.splitlines() for line in lines: line = line.split() if line[9] == uri: return line[3] return -1
259e9955b282baf74fa43bbea1aa7136e8b6e0f7
983
def get_rm_rf(earliest_date, symbol='000300'): """ Rm-Rf(市场收益 - 无风险收益) 基准股票指数收益率 - 国库券1个月收益率 输出pd.Series(日期为Index), 'Mkt-RF', 'RF'二元组 """ start = '1990-1-1' end = pd.Timestamp('today') benchmark_returns = get_cn_benchmark_returns(symbol).loc[earliest_date:] treasury_returns = get_treasury_data(start, end)['1month'][earliest_date:] # 补齐缺省值 treasury_returns = treasury_returns.reindex( benchmark_returns.index, method='ffill') return benchmark_returns, treasury_returns
4a9e03381ba8c0db40342b7848783d1610207270
984
async def detect_custom(model: str = Form(...), image: UploadFile = File(...)): """ Performs a prediction for a specified image using one of the available models. :param model: Model name or model hash :param image: Image file :return: Model's Bounding boxes """ draw_boxes = False try: output = await dl_service.run_model(model, image, draw_boxes) error_logging.info('request successful;' + str(output)) return output except ApplicationError as e: error_logging.warning(model + ';' + str(e)) return ApiResponse(success=False, error=e) except Exception as e: error_logging.error(model + ' ' + str(e)) return ApiResponse(success=False, error='unexpected server error')
9586682d04d71662c61b9c4c4cee248c7ff4998b
985
import torch def _get_top_ranking_propoals(probs): """Get top ranking proposals by k-means""" dev = probs.device kmeans = KMeans(n_clusters=5).fit(probs.cpu().numpy()) high_score_label = np.argmax(kmeans.cluster_centers_) index = np.where(kmeans.labels_ == high_score_label)[0] if len(index) == 0: index = np.array([np.argmax(probs)]) return torch.from_numpy(index).to(dev)
f8b19f483b84b2ba1fa37811326a4f1b8c6be14b
986
import warnings def test_simulated_annealing_for_valid_solution_warning_raised(slots, events): """ Test that a warning is given if a lower bound is passed and not reached in given number of iterations. """ def objective_function(array): return len(list(array_violations(array, events, slots))) array = np.array([ [1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0] ]) assert objective_function(array) == 2 np.random.seed(0) with warnings.catch_warnings(record=True) as w: X = simulated_annealing(initial_array=array, objective_function=objective_function, lower_bound=0, max_iterations=1) assert objective_function(X) == 1 assert len(w) == 1
0236aa0795c976ba3c95d223ab558239dad0eefc
988
from typing import Optional def _add_exccess_het_filter( b: hb.Batch, input_vcf: hb.ResourceGroup, overwrite: bool, excess_het_threshold: float = 54.69, interval: Optional[hb.ResourceGroup] = None, output_vcf_path: Optional[str] = None, ) -> Job: """ Filter a large cohort callset on Excess Heterozygosity. The filter applies only to large callsets (`not is_small_callset`) Requires all samples to be unrelated. ExcessHet estimates the probability of the called samples exhibiting excess heterozygosity with respect to the null hypothesis that the samples are unrelated. The higher the score, the higher the chance that the variant is a technical artifact or that there is consanguinuity among the samples. In contrast to Inbreeding Coefficient, there is no minimal number of samples for this annotation. Returns: a Job object with a single output j.output_vcf of type ResourceGroup """ job_name = 'Joint genotyping: ExcessHet filter' if utils.can_reuse(output_vcf_path, overwrite): return b.new_job(job_name + ' [reuse]') j = b.new_job(job_name) j.image(utils.GATK_IMAGE) j.memory('8G') j.storage(f'32G') j.declare_resource_group( output_vcf={'vcf.gz': '{root}.vcf.gz', 'vcf.gz.tbi': '{root}.vcf.gz.tbi'} ) j.command( f"""set -euo pipefail # Captring stderr to avoid Batch pod from crashing with OOM from millions of # warning messages from VariantFiltration, e.g.: # > JexlEngine - ![0,9]: 'ExcessHet > 54.69;' undefined variable ExcessHet gatk --java-options -Xms3g \\ VariantFiltration \\ --filter-expression 'ExcessHet > {excess_het_threshold}' \\ --filter-name ExcessHet \\ {f'-L {interval} ' if interval else ''} \\ -O {j.output_vcf['vcf.gz']} \\ -V {input_vcf['vcf.gz']} \\ 2> {j.stderr} """ ) if output_vcf_path: b.write_output(j.output_vcf, output_vcf_path.replace('.vcf.gz', '')) return j
a3ae37c5a6c930f5046600bf02fa6d980fbe8017
992
from pathlib import Path from typing import Union from typing import Tuple from typing import Dict def _get_config_and_script_paths( parent_dir: Path, config_subdir: Union[str, Tuple[str, ...]], script_subdir: Union[str, Tuple[str, ...]], file_stem: str, ) -> Dict[str, Path]: """Returns the node config file and its corresponding script file.""" if isinstance(config_subdir, tuple): config_subpath = Path(*config_subdir) else: config_subpath = Path(config_subdir) if isinstance(script_subdir, tuple): script_subpath = Path(*script_subdir) else: script_subpath = Path(script_subdir) return { "config": parent_dir / config_subpath / f"{file_stem}.yml", "script": parent_dir / script_subpath / f"{file_stem}.py", }
4f9a86ed4cf821f57f737336595a9521675f6866
993
import requests def macro_china_hk_cpi_ratio() -> pd.DataFrame: """ 东方财富-经济数据一览-中国香港-消费者物价指数年率 https://data.eastmoney.com/cjsj/foreign_8_1.html :return: 消费者物价指数年率 :rtype: pandas.DataFrame """ url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx" params = { "type": "GJZB", "sty": "HKZB", "js": "({data:[(x)],pages:(pc)})", "p": "1", "ps": "2000", "mkt": "8", "stat": "1", "pageNo": "1", "pageNum": "1", "_": "1621332091873", } r = requests.get(url, params=params) data_text = r.text data_json = demjson.decode(data_text[1:-1]) temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]]) temp_df.columns = [ "时间", "前值", "现值", "发布日期", ] temp_df['前值'] = pd.to_numeric(temp_df['前值']) temp_df['现值'] = pd.to_numeric(temp_df['现值']) temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date return temp_df
1e117746a36b14ee3afe92057677bee2ca6f861f
995
import json def structuringElement(path): """ """ with open(path) as f: data = json.load(f) data['matrix'] = np.array(data['matrix']) data['center'] = tuple(data['center']) return data
99ce5d8321d037e591313aa6a7611479417e25c3
996
def ptsToDist(pt1, pt2): """Computes the distance between two points""" if None in pt1 or None in pt2: dist = None else: vx, vy = points_to_vec(pt1, pt2) dist = np.linalg.norm([(vx, vy)]) return dist
6329407bf7b84ffc835e67ffcb74823de2b33175
997
import torch def d6_to_RotMat(aa:torch.Tensor) -> torch.Tensor: # take (...,6) --> (...,9) """Converts 6D to a rotation matrix, from: https://github.com/papagina/RotationContinuity/blob/master/Inverse_Kinematics/code/tools.py""" a1, a2 = torch.split(aa, (3,3), dim=-1) a3 = torch.cross(a1, a2, dim=-1) return torch.cat((a1,a2,a3), dim=-1)
b0bf02737838a236bf55eb697a27d2cbc671b44c
999
def encrypt(key, pt, Nk=4): """Encrypt a plain text block.""" assert Nk in {4, 6, 8} rkey = key_expand(key, Nk) ct = cipher(rkey, pt, Nk) return ct
41d94f1c050d89e85c6e9f3c74de1cb3cae7a899
1,000
import requests import logging def upload(filename, url, token=None): """ Upload a file to a URL """ headers = {} if token: headers['X-Auth-Token'] = token try: with open(filename, 'rb') as file_obj: response = requests.put(url, data=file_obj, timeout=120, headers=headers, verify=False) except requests.exceptions.RequestException as err: logging.warning('RequestException when trying to upload file %s: %s', filename, err) return None except IOError as err: logging.warning('IOError when trying to upload file %s: %s', filename, err) return None if response.status_code == 200 or response.status_code == 201: return True return None
eb8a8060294322bd9df187c8076d8f66b4dc775c
1,001
import torch def cost(states, sigma=0.25): """Pendulum-v0: Same as OpenAI-Gym""" l = 0.6 goal = Variable(torch.FloatTensor([0.0, l]))#.cuda() # Cart position cart_x = states[:, 0] # Pole angle thetas = states[:, 2] # Pole position x = torch.sin(thetas)*l y = torch.cos(thetas)*l positions = torch.stack([cart_x + x, y], 1) squared_distance = torch.sum((goal - positions)**2, 1) squared_sigma = sigma**2 cost = 1 - torch.exp(-0.5*squared_distance/squared_sigma) return cost
fdbf3105ff04437b05b5914aac43c61706f87287
1,002
def flatmap(fn, seq): """ Map the fn to each element of seq and append the results of the sublists to a resulting list. """ result = [] for lst in map(fn, seq): for elt in lst: result.append(elt) return result
c42d07f712a29ece76cd2d4cec4f91ec2562a1c0
1,003
def the_test_file(): """the test file.""" filename = 'tests/resources/grype.json' script = 'docker-grype/parse-grype-json.py' return { 'command': f'{script} {filename}', 'host_url': 'local://' }
d97d621d05f3844053b42c878dc8189fc8d264d0
1,004
import csv def build_stations() -> tuple[dict, dict]: """Builds the station dict from source file""" stations, code_map = {}, {} data = csv.reader(_SOURCE["airports"].splitlines()) next(data) # Skip header for station in data: code = get_icao(station) if code and station[2] in ACCEPTED_STATION_TYPES: stations[code] = format_station(code, station) code_map[station[0]] = code return stations, code_map
773d34c7d33585611dfb79fc4beaf8702a2c57df
1,005
def process_row(row, fiscal_fields): """Add and remove appropriate columns. """ surplus_keys = set(row) - set(fiscal_fields) missing_keys = set(fiscal_fields) - set(row) for key in missing_keys: row[key] = None for key in surplus_keys: del row[key] assert set(row) == set(fiscal_fields) return row
1c55fe628b53be72633d2fcae7cc1fbac91d04ae
1,009
def DefaultTo(default_value, msg=None): """Sets a value to default_value if none provided. >>> s = Schema(DefaultTo(42)) >>> s(None) 42 """ def f(v): if v is None: v = default_value return v return f
10401d7214d15c2b0bf28f52430ef71b5df0a116
1,010
def load_files(file_list, inputpath): """ function to load the data from potentially multiple files into one pandas DataFrame """ df = None # loop through files and append for i, file in enumerate(file_list): path = f"{inputpath}/{file}" print(path) df_i = pd.read_csv(path) if i == 0: df = df_i else: df = pd.concat([df, df_i], axis=0, ignore_index=True) return df
2f1ec9519c4ff1cb9d8a2f492e80cc05ecb968db
1,011
def list_all(): """ List all systems List all transit systems that are installed in this Transiter instance. """ return systemservice.list_all()
21efc81b1312f01d6b016fa10cdf675b0e22655f
1,012
def putText(image: np.ndarray, text: str, org=(0, 0), font=_cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=(0, 0, 255), thickness=1, lineType=_cv2.LINE_AA, bottomLeftOrigin=False) -> np.ndarray: """Add text to `cv2` image, with default values. :param image: image array :param text: text to be added :param org: origin of text, from top left by default :param font: font choice :param fontScale: font size :param color: BGR color, red by default :param thickness: font thickness :param lineType: line type of text :param bottomLeftOrigin: True to start from bottom left, default False :return: image with text added """ return _cv2.putText(image, text, org, font, fontScale, color, thickness, lineType, bottomLeftOrigin)
37fd20c2afb70a59f78f35741c235e9793721dab
1,013
def gaussFilter(fx: int, fy: int, sigma: int): """ Gaussian Filter """ x = tf.range(-int(fx / 2), int(fx / 2) + 1, 1) Y, X = tf.meshgrid(x, x) sigma = -2 * (sigma**2) z = tf.cast(tf.add(tf.square(X), tf.square(Y)), tf.float32) k = 2 * tf.exp(tf.divide(z, sigma)) k = tf.divide(k, tf.reduce_sum(k)) return k
b83bcadba782f16f6932c081b9f20ad9bd71828b
1,014
def do_something(param=None): """ Several routes for the same function FOO and BAR have different documentation --- """ return "I did something with {}".format(request.url_rule), 200
7a50206c27b66d2b3ff588777ea95927b527a719
1,015
import re from typing import Literal def extract_text( pattern: re.Pattern[str] | str, source_text: str, ) -> str | Literal[False]: """Match the given pattern and extract the matched text as a string.""" match = re.search(pattern, source_text) if not match: return False match_text = match.groups()[0] if match.groups() else match.group() return match_text
a6f762cfd26dd1231db4b6e88247e2566d186212
1,016
def nodal_distribution_factors_v2(topo: ndarray, volumes: ndarray): """The j-th factor of the i-th row is the contribution of element i to the j-th node. Assumes a regular topology.""" ndf = nodal_distribution_factors(topo, volumes) return ndf
b805b9fa2617bc9501910bc43cb623cd15d3aea5
1,019
def game_core_binary(number_to_guess): """Binary search approach. Set the first predict value as the middle of interval, i.e. 50. Then decrease or increase the predict number by step. The step is calculated using the check interval divided by 2, i.e. 25, 13 ... 1 The minimum step is always 1. The function return count of guesses""" count_guesses = 1 predict = step = round(MAX_NUMBER / 2) while number_to_guess != predict: count_guesses += 1 step = round(step / 2) if step > 1 else 1 if number_to_guess > predict: predict += step elif number_to_guess < predict: predict -= step return count_guesses
909322bda51c25175c372708896bc6aca5e9753b
1,020
def linear_trend(series, return_line=True): """ USAGE ----- line = linear_trend(series, return_line=True) OR b, a, x = linear_trend(series, return_line=False) Returns the linear fit (line = b*x + a) associated with the 'series' array. Adapted from pylab.detrend_linear. """ series = np.asanyarray(series) x = np.arange(series.size, dtype=np.float_) C = np.cov(x, series, bias=1) # Covariance matrix. b = C[0, 1]/C[0, 0] # Angular coefficient. a = series.mean() - b*x.mean() # Linear coefficient. line = b*x + a if return_line: return line else: return b, a, x
129b63dd9f194dd0a6506e2645e330fe92ea6a1c
1,021
import torch def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): """Gradcheck wrapper for masked operations. When mask is specified, replaces masked-out elements with zeros. Use for operations that produce non-finite masked-out elements, for instance, for minimum and maximum reductions. """ output = op(input, *args, **kwargs) mask = kwargs.get('mask') if mask is not None: output_mask = torch._masked._output_mask(op, input, *args, **kwargs) output = torch.where(output_mask, output, output.new_zeros([])) return output
fa0d3433a8cf3d60c81c96dc154d8f0e82acd791
1,022
def classify(neural_net, image_file): """ Using the given model and image file, returns the model's prediction for the image as an array. """ img = Image.open(image_file) img.load() img_array = np.asarray(img) img_array.shape = (1, 100, 100, 3) prediction = model.predict(img_array)[0][0] return prediction
3d8b301b3f41b5cad04233228198424670f06506
1,023
def delete(job): """Delete a job.""" # Initialise variables. jobid = job["jobid"] try: shellout = shellwrappers.sendtossh(job, ["qdel " + jobid]) except exceptions.SSHError: raise exceptions.JobdeleteError("Unable to delete job.") return shellout[0]
c870e07210063136ac3651691d1e54dc292f0830
1,024
import itertools def optimum_simrank(x_p, x_n, alpha): """Intermediary function to the one below.""" pos_pair_1 = itertools.combinations(x_p, 2) pos_pair_2 = itertools.combinations(x_n, 2) neg_pair = itertools.product(x_p, x_n) def get_val_from_pair(x): # Transforms each pair into one minus the minimum of its l1 distance to (0,0) or (1,1). distance_to_lower_corner = max(abs(x[0]), abs(x[1])) distance_to_upper_corner = max(abs(1. - x[0]), abs(1. - x[1])) return 1 - min(distance_to_lower_corner, distance_to_upper_corner) x_p = (np.array(list(map(get_val_from_pair, pos_pair_1)) + list(map(get_val_from_pair, pos_pair_2)))) x_n = np.array(list(map(get_val_from_pair, neg_pair))) def opt_fun(i_p, i_n): if float(i_n) / x_n.shape[0] <= alpha: return i_p / x_p.shape[0] return - float("inf") X = np.hstack([x_p, x_n]) Y = np.array([+1]*len(x_p) + [-1]*len(x_n)) f_opt, crit_opt, _ = ut.bipart_partition(X, Y, opt_fun) return 1-f_opt, crit_opt
bc4f451dc2ae5f9fe653e9330241202b5f470e49
1,025
from enaml.core.import_hooks import imports from contextlib import contextmanager from enaml.core.operators import operator_context def imports(operators=None, union=True): """ Lazily imports and returns an enaml imports context. Parameters ---------- operators : dict, optional An optional dictionary of operators to push onto the operator stack for the duration of the import context. If this is not provided, the default Enaml operators will be used. Unless a custom model framework is being used (i.e. not Atom), custom operators will typically not be needed. union : bool, optional Whether to union the operators with the operators on the top of the operator stack. The default is True and is typically the correct choice to allow overriding a subset of the default Enaml operators. Returns ------- result : context manager A context manager which will install the Enaml import hook (and optional operators) for the duration of the context. """ if operators is None: return imports() @contextmanager def imports_context(): with imports(): with operator_context(operators, union): yield return imports_context()
c0068c39a4c9c39c8789fd79ed651ecf2e50c3b7
1,026
import io import tokenize from typing import cast def apply_job_security(code): """Treat input `code` like Python 2 (implicit strings are byte literals). The implementation is horribly inefficient but the goal is to be compatible with what Mercurial does at runtime. """ buf = io.BytesIO(code.encode("utf8")) tokens = tokenize.tokenize(buf.readline) # NOTE: by setting the fullname to `mercurial.pycompat` below, we're # ensuring that hg-specific pycompat imports aren't inserted to the code. data = tokenize.untokenize(replacetokens(list(tokens), "mercurial.pycompat")) return cast(str, data.decode("utf8"))
8dd7e0f6ad91f9c98ea50ac76fb30616d9d8f266
1,027
def fetch(gpname: str): """" Gives gunpowder Parameters ---------- gpname: str Gunpowder name Returns ------- gpowder: dict Gunpowder in dictionary form """ gpowders = _load_many() return gpowders[gpname]
e880a62c92937d564ff84af33c7c0e1dd2383d9d
1,028
def _kc_frequency_features(time_data, times, sfreq): """ Calculate absolute power of delta and alpha band before (on a 3 seconds windows) and after K-complexes""" exp = [('before', -2.5, -0.5), ('after', 1, 3)] res = {} for m in exp: kc_matrix_temp = time_data[:, np.bitwise_and(times > m[1], times < m[2])] absol_power = compute_absol_pow_freq_bands(sfreq, kc_matrix_temp, psd_method='multitaper', psd_params={'mt_adaptive': True, 'mt_bandwidth': 3, 'mt_low_bias': True}, freq_bands=[0.5, 4, 8, 12]) delta = absol_power[:, 0] alpha = absol_power[:, 2] res[m[0]] = (delta, alpha) delta_before, alpha_before, delta_after, alpha_after = res['before'][0], res['before'][1],\ res['after'][0], res['after'][1] return delta_before, alpha_before, delta_after, alpha_after
0e0df2c3f2b0baa8e6fb8118fa01a89b62c2656c
1,029