content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def cov(x, y, w): """Calculates weighted covariance""" return np.sum( w * (x - np.average(x, axis=0, weights=w)) * (y - np.average(y, axis=0, weights=w)) ) / np.sum(w)
b590c43c02321c3503271c56f6eca1b48a3169d8
700
from typing import List from typing import Dict def eval_metrics_all( y: List[np.ndarray], y_hat: List[np.ndarray] ) -> Dict[str, float]: """Calculates combined accuracy, f1, precision, recall and AUC scores for multiple arrays. The arrays are shorted to the minimum length of the corresponding partner and stacked on top of each other to calculated the combined scores. Arguments: y (np.ndarray): Ground truth. y_hat (np.ndarray): Prediction. Returns: Dict[str, float]: Returns a dict with all scores. Example: >>> y = [np.ones((10, 1)), np.zeros((10, 1))] >>> y_hat = [np.ones((10, 1)), np.zeros((10, 1))] >>> eval_metrics_all(y, y_hat) {'accuracy': 1.0, 'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'roc_auc': 1.0} """ if len(y) != len(y_hat): raise ValueError('y and y_hat must have the same number elements.') # allow 1d or 2d arrays with the 2nd dimension of 1 check_ndim(*y, *y_hat, ndim=2, strict=False) check_size(*y, *y_hat, size=1, axis=1, strict=False) y = list(map(lambda x: x.reshape(-1), y)) y_hat = list(map(lambda x: x.reshape(-1), y_hat)) # truncate corresponding arrays to the same length y_, y_hat_ = np.hstack(list(truncate(*zip(y, y_hat)))) return eval_metrics(y_, y_hat_)
79374750a7bf648dc8b898b187fd4b19f470bc0d
701
from re import A def fake_dataset_no_label(path, range1, batch_size=32, shuffle=False): """ Create fake dataset with no label Args: path (str) : provide the data settings range1 (tuple) : range of generated images batch_size (int): number of samples contained in each generated batch shuffle (bool) : shuffle the data Returns: data loader """ list_ids = [] labels = {} for i in range(range1[0], range1[1]): list_ids.append(path + 'gen_'+str(i)+'.jpg') labels[path + 'gen_'+str(i)+'.jpg'] = -1 # as per the author's citation, we have transformed the input image # (resize to 64 * 64, 256 * 256, 224 * 224) pre_process = [(64, 64), (256, 256), (224, 224)] mean_normalize = (0.485, 0.456, 0.406) std_normalize = (0.229, 0.224, 0.225) transform = A.Compose([ A.Resize(pre_process[0][0], pre_process[0][1]), A.Resize(pre_process[1][0], pre_process[1][1]), A.CenterCrop(width=pre_process[2][0], height=pre_process[2][1]), A.Normalize(mean=mean_normalize, std=std_normalize) ]) loader = data_iterator_celeba(list_ids, labels, transform=transform, batch_size=batch_size, shuffle=shuffle) return loader
24db6921830cf775ee1c9a2a3797dcc521c202bb
702
def number_of_songs_match(folder, songs): """ Checks if the number of music files in folder matches the number of tracks listed in songs. Arguments: - folder: path to folder where music files are found - songs: list of track numbers Returns: True / False """ files = [f for f in listdir(folder) if isfile(join(folder, f)) and f.endswith('.mp3')] if len(files) != len(songs): return False return True
0de44cfdce9add35fba61efd0b0351f450df0e9e
703
def spaces_to_pluses(q, city, state): """ """ if city and state: return split_text(q), split_text(city), split_text(state) else: return split_text(q), 'Nationwide', ' '
5ad007d7a307fc58812dc5b1fd55542411a7a9dc
704
from typing import Optional from typing import List def _check_str_input(var, input_name: str, valid_options: Optional[List[str]] = None) -> str: """ _check_str_input Convenience function to check if an input is a string. If argument valid_options is given, this function will also check that var is a valid option from the valid_options specified. Parameters ---------- var the input variable to check input_name : str the name of the variable to include if an error is raised valid_options: List[str], optional a list of valid options for var Returns ------- str the input var after lowering ans stripping the string """ if not isinstance(var, str): raise ValueError("Invalid input {0} for {1}. Input {1} must be a string.".format( var, input_name)) var = var.strip().lower() if valid_options is not None: valid_options = [option.strip().lower() for option in valid_options] if var not in valid_options: raise ValueError("Invalid input {0} for {1}. Input {1} must be one of the following " "options: {2}.".format(var, input_name, valid_options)) return var
357a8516fe65dddb35b7799ddc68b892da75ea02
705
import os import subprocess import gzip import shutil import tarfile import zipfile def os_compress(filename, ctype, remove_original=False): """ compress a file to any of the formats: ['.Z', '.gz', '.tar.gz', '.zip'] If the instance is already compressed (to any format), no operation will be performed. If it is uncompressed: 1) then the file will be compressed 3) if remove_original is set to True, then the original uncompressed file will be removed (only if the compression process is succeseful) """ if not os.path.isfile(filename): raise RuntimeError( "[ERROR] compress::os_compress File {:} does not exist".format( filename)) if ctype is None: return filename, filename if not ctype.startswith('.'): ctype = '.' + ctype compressed_file = '{:}{:}'.format(filename, ctype) status = 0 if ctype == '.Z': try: subprocess.call(["compress", "-f", "{:}".format(filename)]) except: status = 1 elif ctype == '.gz': try: with open(filename, 'rb') as f_in, gzip.open(compressed_fileg, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) except: status = 2 elif ctype == '.tar.gz': try: with tarfile.open(compressed_fileg, "w:gz") as tarout: tarout.add(filename, os.path.basename(filename)) except: status = 3 elif ctype == '.zip': try: with zipfile.ZipFile(compressed_fileg, "w") as zipout: zipout.write(filename, os.path.basename(filename)) except: status = 4 else: status = 5 if status > 0 or not os.path.isfile(compressed_file): msg = "[ERROR] Failed to compress RINEX file {:} (code: {:})".format( filename, status) raise RuntimeError(msg) else: if remove_original and ctype != '.Z': os.remove(filename) return filename, compressed_file
9bbf33a5aa263398df1e77b99ca2440b921c9b7f
706
def run_U_fixed_dynamics(**kwargs): """ Run simulation for a given set of parameter values and generate relevant plots """ # Steady state checks #print('============================== U fixed, U='+str(kwargs['U'])) a = mpde(**kwargs) #lib.disp_params(a) # display non-array parameters #t0 = time.time() a.run() #t1 = time.time() #print('*\t Run time',t1-t0) initial_mass = np.sum(a.sol[0,:])*a.dx mass_true = lib.mass_fn(a.t,initial_mass,**kwargs) #lib.disp_norms(a,ground_truth_values) fig = plt.figure(figsize=(10,5)) ax11 = fig.add_subplot(121) ax12 = fig.add_subplot(122) mass_pde = np.sum(a.sol,axis=1)*a.dx ax11.plot(a.t,mass_true,label='mass true') ax11.plot(a.t,mass_pde,label='mass pde') ax12.plot(a.t,np.abs(mass_pde - mass_true),label='|pde-(true)|') ax11.set_title('mass over time') ax12.set_title('mass diff') ax11.set_xlabel('t') ax12.set_xlabel('t') ax11.legend() ax12.legend() plt.tight_layout() # include dt kwargs = {**kwargs, **{'dt':a.dt}} fname = (DIR_TESTS + 'U_fixed_dynamics_' + lib.fname_suffix(**kwargs)) plt.savefig(fname) plt.close() return np.amax(np.abs(mass_true - mass_pde))
735e70c9082ffd92d2e1f5c20b0ca222f5ca25be
707
def removeDuplicates(bookmarks, newBookmarks): """Creates and returns a new list of bookmarks without any duplicates""" nodup = [] for bmNew in newBookmarks: foundDup = False for bm in bookmarks: if (bm.linkURL == bmNew.linkURL): foundDup = True break if (not foundDup): nodup.append(bmNew) return nodup
12280e827796b95be30f645c5ca0e495379d6a55
708
def TNaming_Naming_GetID(*args): """ * following code from TDesignStd ============================== :rtype: Standard_GUID """ return _TNaming.TNaming_Naming_GetID(*args)
4b9c6aa4b6b9029d5ac879853b85780e46984d50
709
def assigned_user_add(request, location_id, destination): """ Assigned user add is a POST function where it will ADD a user to a project/task/opportunity/requirement. :param request: :param location_id: :param destination: :return: """ # Load the template t = loader.get_template('NearBeach/blank.html') # context c = { } return HttpResponse(t.render(c, request))
92f85aad0eb99f867c8c680ed6e6d49be002ee8c
710
import re def _parse_challenge(header): # type: (str) -> Dict[str, str] """Parse challenge header into service and scope""" ret = {} if header.startswith(BEARER): challenge_params = header[len(BEARER) + 1 :] matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params) _clean(matches) ret = {} for i in range(0, len(matches), 2): ret[matches[i]] = matches[i + 1] return ret
dc9044cdfa585a9dfb2cb1de9349d945e7afc985
711
def test_get_batch(source): """ Creates an input/target pair for evaluation """ seq_len = len(source) - 1 data = source[:seq_len] target = source[1:1+seq_len].view(-1) return data, target
0c26f9f957063bb136f9fe77ed1a8bbdedc38a15
712
def getReceptorResidues(filename=None, data=None): """Accepts a PDB(TQ) file and returns a nested dictionary of: chain:residue:atoms """ if filename: lines = getLines(filename) else: lines = data structure = {} for l in lines: if l.startswith("ATOM") or l.startswith("HETATM"): res_t=l[17:20].strip() res_n=l[22:27].strip() res=res_t+res_n chain=l[21].strip() atom=l[12:17].strip() if not chain in structure: structure[chain]={} if not res in structure[chain]: structure[chain][res] = [] if not atom in structure[chain][res]: structure[chain][res].append(atom) return structure
e409a61bac880bd586a1a21865389b30c1c28838
713
def extract_first_compute_cell(text): """ INPUT: a block of wiki-like marked up text OUTPUT: - ``meta`` - meta information about the cell (as a dictionary) - ``input`` - string, the input text - ``output`` - string, the output text - ``end`` - integer, first position after }}} in text. """ # Find the input block i = text.find('{{{') if i == -1: raise EOFError j = text[i:].find('\n') if j == -1: raise EOFError k = text[i:].find('|') if k != -1 and k < j: try: meta = dictify(text[i+3:i+k]) except TypeError: meta = {} i += k + 1 else: meta = {} i += 3 j = text[i:].find('\n}}}') if j == -1: j = len(text) else: j += i k = text[i:].find('\n///') if k == -1 or k+i > j: input = text[i:j] output = '' else: input = text[i:i+k].strip() output = text[i+k+4:j] return meta, input.strip(), output, j+4
0dabdb5ad7b4b1d6f513d485782d25f134cf3f62
714
from typing import Union from typing import IO from typing import Dict def check_schema(loader_impl: LoaderImpl) -> LoaderImpl: """Wrapper method to check column names and types.""" @wraps(loader_impl) def wrapped_loader(fp: Union[str, IO], extra_fields: Dict[str, str] = None) -> DataFrame: name = fp if isinstance(fp, str) else fp.name data = loader_impl(fp, extra_fields) schema = FILE_SCHEMA if not extra_fields else {**FILE_SCHEMA, **extra_fields} for column in list(data.columns): if column not in schema: log.info(f'From file ({name}): ignoring column \'{column}\'') data.drop([column], axis=1, inplace=True) for column, dtype in schema.items(): if column not in data.columns: raise RuntimeError(f'From file ({name}): missing column \'{column}\'') else: try: data[column] = data[column].astype(dtype) except (TypeError, ValueError) as error: raise RuntimeError(f'From file ({name}), column \'{column}\': {error}') from error return data return wrapped_loader
471153738204a4aabc7219c23f261a8761ff8e91
715
import logging from datetime import datetime def get_album_photos(album, offset, vk_session): """Retrieves list of photos within given album from VK.com :param album: :type album: str :param offset: :type offset: int or None :param vk_session: instance of :class:`vk_api.VkApi` :type vk_session: :class:`vk_api.VkApi` :return: """ def normpath(filename): keepcharacters = [' ', '.', '_', ','] return "".join(c for c in filename if c.isalnum() or c in keepcharacters).rstrip() items = [] try: if USER_PHOTOS_ALBUM_ID == album['id']: response = vk_session.method( 'photos.getUserPhotos', values={ 'user_id': vk_session.token['user_id'], 'count': 1000, 'offset': offset or 0, 'photo_sizes': 1 }) else: response = vk_session.method( 'photos.get', values={ 'owner_id': vk_session.token['user_id'], 'album_id': album['id'], 'offset': offset or 0, 'photo_sizes': 1 }) except Exception as e: logging.error(e) return items image_types = { 's': 0, 'm': 1, 'x': 2, 'o': 3, 'p': 4, 'q': 5, 'r': 6, 'y': 7, 'z': 8, 'w': 9 } if 'items' in response: for item in response['items']: sizes = item.get('sizes') if not sizes: logging.info('Item skipped!') continue newlist = sorted( sizes, key=lambda x: image_types.get(x.get('type')), reverse=True) image = { 'id': item['id'], 'date': datetime.datetime.fromtimestamp(item['date']), 'url': newlist[0].get('url') } if item.get('text'): image['title'] = normpath(item['text']) items.append(image) return items
e6bc3fae5c0c132d10eb0af200e8e6a9872fa04b
716
def get_view_cursor(**kwargs) -> 'XTextViewCursor': """ Gets current view cursor which is a XTextViewCursor Keyword Args: o_doc (object, optional): current document (xModel) Returns: object: View Cursor """ o_doc = kwargs.get('o_doc', None) if o_doc is None: o_doc = get_xModel() # https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1text_1_1XTextViewCursor.html frame: object = o_doc.CurrentController.Frame current_controler: object = frame.getController() # XController view_cursor = current_controler.getViewCursor() return view_cursor
6fa8ce40425684e2238337da59c52e3004b18787
717
def get_common_metrics(test_values, predicted): """ Return some common classifier metrics :param test_values: values to test with :param predicted: predicted values :return: accuracy, precision and recall value """ accuracy = metrics.accuracy_score(test_values, predicted) precision = metrics.precision_score(test_values, predicted) recall = metrics.recall_score(test_values, predicted) return accuracy, precision, recall
badbe3db4641352c1f36db1996c498f3b467d8f4
718
import os def hash_bower_component(hash_obj, path): """Hash the contents of a bower component directory. This is a stable hash of a directory downloaded with `bower install`, minus the .bower.json file, which is autogenerated each time by bower. Used in lieu of hashing a zipfile of the contents, since zipfiles are difficult to hash in a stable manner. Args: hash_obj: an open hash object, e.g. hashlib.sha1(). path: path to the directory to hash. Returns: The passed-in hash_obj. """ if not os.path.isdir(path): raise ValueError('Not a directory: %s' % path) path = os.path.abspath(path) for root, dirs, files in os.walk(path): dirs.sort() for f in sorted(files): if f == '.bower.json': continue p = os.path.join(root, f) hash_obj.update(p[len(path)+1:].encode("utf-8")) hash_obj.update(open(p, "rb").read()) return hash_obj
2c80251b43477df295469989575f8cf47d5c3397
719
def align_jp_and_en_boxes(pd_results) -> pd.DataFrame: """boxes are not ordered on the page, so heuristically must match them based on location on page """ japanese_results = pd.DataFrame.copy( pd_results[pd_results.language == "jp"]).reset_index() english_results = pd.DataFrame.copy( pd_results[pd_results.language == "en"]).reset_index() japanese_vals = japanese_results[["left", "top"]].values english_vals = english_results[["left", "top"]].values n = NearestNeighbors(n_neighbors=1) n.fit((japanese_vals)) dis, index = n.kneighbors(english_vals) english_results["boxID"] = index.reshape(-1) return japanese_results.append(english_results).reset_index()
8292cd2ac91c497ba3e8e737b0194b27ffee4455
720
def productivity_flag(): """ Real Name: b'Productivity Flag' Original Eqn: b'1' Units: b'Dmnl' Limits: (None, None) Type: constant b'' """ return 1
5de320366584f3e2803172c9e97c5b3b1fc79715
721
def create_cartpole_network(hidden_layers=2, neurons=56): """ Network that can solve gyms 'CartPole-v1' environment. """ net = Sequential() net.add(Dense( neurons, input_shape=(4,), kernel_regularizer=l2(0.001), kernel_initializer=GlorotNormal(), activation='relu'), ) net.add(Dropout(0.1)) for n in range(hidden_layers): net.add(Dense( neurons, kernel_regularizer=l2(0.001), kernel_initializer=GlorotNormal(), activation='relu'), ) net.add(Dropout(0.1)) net.add(Dense(2, activation='relu')) return net
52bdf8352595dcd0cb73951c0b2ca575357c38d6
722
def format_as_rfc2822(*args, **kwrags): """Alias of ``format_as_rss()``.""" return format_as_rss(*args, **kwrags)
3eb94d85241b8a96ee841233c201acd65fc683f3
723
def train_model_exponentially(train_images, train_labels, parts, exponent): """ Trains a model incrementally, using training data partitions that increase exponentially, and exports it. :param train_images: :param train_labels: :param parts: :param exponent: :return: The final model """ normal_model = model_handler.cnn_model() # prepare data train_images, train_labels = data_manipulator.prepare_visual_data(train_images, train_labels) # split training data to partitions partitioned_train_images = partition_data_exponentially(train_images, parts, exponent) partitioned_train_labels = partition_data_exponentially(train_labels, parts, exponent) # train model for part in range(parts): normal_model.fit(partitioned_train_images[part], partitioned_train_labels[part], epochs=5, batch_size=64) model_handler.save_model(normal_model, 'normal_model_exponential_part_' + str(part + 1) + '_of_' + str(parts)) return normal_model
0e107b2e3fa69233679b9c84a7a3caf015ba53e4
724
def get_config_of(tests, test_name): """ Find generic values of test """ for test in tests: if test.name == test_name: try: return test._test_case._run._config # pylint: disable=protected-access except AttributeError: return test._run._config # pylint: disable=protected-access raise KeyError(test_name)
821f0b180a1846b432fd55afc39b1b24b4a80de0
725
from typing import Union from typing import List def transmit_format(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None unformatted_columns = set(self.column_names) - set(self._format_columns or []) self_format = { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self._format_columns, "output_all_columns": self._output_all_columns, } # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] # re-apply format to the output for dataset in datasets: new_format = self_format.copy() if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns) # sort the columns to have a deterministic list of columns that we can compare with `out_format` new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns) out_format = { "type": dataset._format_type, "format_kwargs": dataset._format_kwargs, "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None, "output_all_columns": dataset._output_all_columns, } if out_format != new_format: # only apply if there's a change not to update the fingerprint for nothing dataset.set_format(**new_format) return out wrapper._decorator_name_ = "transmit_format" return wrapper
3a20f6ad1d5f7b826742f7c55694e07a5c530273
726
def parse_version(version: str) -> Version: """Parses version string to Version class.""" parsed = version.split(".") try: return Version(int(parsed[0]), int(parsed[1]), int(parsed[2] if len(parsed) > 2 else -1)) except ValueError: return Version(0, 0, -1)
89a785e97fc40b6e4002f2d35e75777758d665d6
727
def rate_of_change(x, t_Δ=1): """ :param x: a series :param t_Δ: the intervals between each observation (series or constant) :return: rate of change for x """ diffs = np.diff(x) / t_Δ return diffs
a6e07fbc8c29a66a6904eb36011d0c093e028d58
728
def draw_cutout(data, title, lower_bound=0, upper_bound=1, is_mobile=False): """ Draw a cutout data """ # Update graph data for stamps data = np.nan_to_num(data) data = sigmoid_normalizer(data, lower_bound, upper_bound) data = data[::-1] data = convolve(data, smooth=1, kernel='gauss') if is_mobile: mask = create_circular_mask(len(data), len(data[0]), center=None, radius=None) data[~mask] = np.nan if is_mobile: zsmooth = 'fast' else: zsmooth = False fig = go.Figure( data=go.Heatmap( z=data, showscale=False, hoverinfo='skip', colorscale='Greys_r', zsmooth=zsmooth ) ) # Greys_r axis_template = dict( autorange=True, showgrid=False, zeroline=False, linecolor='black', showticklabels=False, ticks='') fig.update_layout( title='', margin=dict(t=0, r=0, b=0, l=0), xaxis=axis_template, yaxis=axis_template, showlegend=True, paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)' ) if not is_mobile: fig.update_layout(width=150, height=150) style = {'display': 'inline-block', 'height': '10pc', 'width': '10pc'} else: style = {'display': 'inline-block', 'height': '5pc', 'width': '5pc'} graph = dcc.Graph( id='{}-stamps'.format(title), figure=fig, style=style, config={'displayModeBar': False} ) return graph
016e624fab9aa3b170dafba509a2d61f5444e9bb
729
def _check_sample(sample_pair: dict): """ Controls a sample. Parameters ---------- sample_pair : dict Sample must contain image and mask: " "{'image': image, 'mask': mask} Returns ------- sample : dict Sample must contain image and mask: " "{'image': image, 'mask': mask} """ if isinstance(sample_pair, dict): if len(sample_pair) != 2: raise ValueError( "Sample must contain image and mask: " "{'image': image, 'mask': mask}" ) else: raise TypeError("Sample must be a dict like: {'image': image, 'mask': mask}") return sample_pair
112e9e46a753f8754d25ddbfe2505535c2f9ac96
730
def _make_cls(cls, attrs): """Make the custom config class.""" return type(f'Custom{cls.__name__}', (cls, ), attrs, )
bbe1f7694fbb30bdcb3e3c5df0207ae641d022b3
731
from datetime import datetime def get_date(delta): """Build a date object with given day offset""" date = datetime.datetime.now() if delta is not None: offset = datetime.timedelta(days=delta) date = date + offset date = date.strftime("%A %-m/%-d") return date
ef3762a088946e81a8a26165395dad356e103ec9
732
def mover_alfil(tablero, x_inicial, y_inicial, x_final, y_final): """ (list of list, int, int, int, int) -> list of list :param tablero: list of list que representa el tablero :param x_inicial: int que representa la posicion inicial en X :param y_inicial: int que representa la posicion inicial en Y :param x_final: int que representa la posicion final en X :param y_final: int que representa la posicion final en Y :return: list of list que representa un tablero final """ tab = tablero.copy() if ((x_inicial - y_inicial == x_final - y_final) or (x_inicial + y_inicial == x_final + y_final)) and tab[x_inicial][y_final].lower() == 'a': if (x_inicial != x_final) and (y_inicial != y_final): for x in range(x_inicial +1, x_final): if tab[x][y_final] != ' ': raise ValueError('El camino no es valido') for y in range(y_inicial +1, y_final): if tab[x_final][y] != ' ': raise ValueError('El camino no es valido') tab[x_final][y_final] = 'a' tab[x_inicial][y_inicial] = ' ' return tab
4c31db653fd448878f3e629b1237173f3eb26a56
733
def write_decaytable_entry_calchep(grouped_decays, gambit_model_name, calchep_pdg_codes, gambit_pdg_codes, decaybit_dict, calchep_processes): """ Writes a DecayBit DecayTable::Entry module function for a given set of of particle decays. Here, grouped_decays is a list, where: 1. The first element is the decaying particle. 2. The remaining entries are pairs of decay products. e.g. grouped_decays = [h, [ [tau+, tau-], [b, bbar], [t, tbar] ]] """ # Find the name of the particle as in DecayBit_rollcall.hpp decayparticle = pdg_to_particle(grouped_decays[0], decaybit_dict) chep_name = pdg_to_particle(grouped_decays[0], calchep_pdg_codes) # If the particle does not decay, according to the particle database, # then there is no need to write a capability. if decayparticle != None: pass else: return "" # TODO: proper support for BSM contributions to Z and W decays if decayparticle == "Z": return "" elif decayparticle == "W_plus": return "" elif decayparticle == "W_minus": return "" function_name = "CH_{0}_{1}_decays".format(gambit_model_name, decayparticle).replace('~','bar') spectrum = gambit_model_name + "_spectrum" # Definitely a nicer way to do this, but, this will do for now. # Should make it a bit easier to add 3 body final states. # (Overloaded as a backend function?) products = np.array(grouped_decays[1]) c_name = [] g_name = [] for i in np.arange(len(products)): c_name.append(map(lambda x:pdg_to_particle(x, calchep_pdg_codes),products[i])) g_name.append(map(lambda x:pdg_to_particle(x, gambit_pdg_codes), products[i])) out1c = np.array([pdg_to_particle(x, calchep_pdg_codes) for x in products[:,0]]) out2c = np.array([pdg_to_particle(x, calchep_pdg_codes) for x in products[:,1]]) c_strings = [] g_strings = [] for i in np.arange(len(c_name)): c_strings.append("{{{}}}".format(', '.join("\"{0}\"".format(x) for x in c_name[i]))) g_strings.append("{{{}}}".format(', '.join("\"{0}\"".format(y) for y in g_name[i]))) calchep_processes['decays'][chep_name].append([list(i) for i in zip(out1c, out2c)]) towrite = ( "void {0}(DecayTable::Entry& result)\n" "{{\n" "using namespace Pipes::{0};\n" "// Clear previous decays\n" "result = DecayTable::Entry();\n" "\n" "const Spectrum& spec = *Dep::{1};\n" "\n" ).format(function_name, spectrum) if decayparticle == "Higgs": towrite += "result = *Dep::Reference_SM_Higgs_decay_rates;\n\n" towrite += ( "str model = \"{0}\";\n" "str in = \"{1}\";" " // In state: CalcHEP particle name\n" "std::vector<std::vector<str>> out_calchep = {{{2}}}; " "// Out states: CalcHEP particle names\n" "std::vector<std::vector<str>> out_gambit = {{{3}}}; " "// Out states: GAMBIT particle names\n\n" "for (unsigned int i=0; i<out_calchep.size(); i++)\n" "{{\n" "\n" "double gamma = BEreq::CH_Decay_Width(model, in, " "out_calchep[i]); // Partial width\n" "double newwidth = result.width_in_GeV + gamma; " "// Adjust total width\n" "double wscaling = ( gamma == 0. ) ? 1 : result.width_in_GeV" "/newwidth; // Scaling for BFs, avoid NaNs\n" "result.width_in_GeV = newwidth;\n" "\n" "for (auto it = result.channels.begin(); " "it != result.channels.end(); ++it)\n" "{{\n" "it->second.first *= wscaling; " "// rescale BF \n" "it->second.second *= wscaling; // rescale error on BF \n" "}}\n" "\n" "// Avoid NaNs!\n" "double BF = ( gamma == 0. ) ? 0. : gamma/result.width_in_GeV;\n" "\n" "result.set_BF(BF, 0.0, " "out_gambit[i][0], out_gambit[i][1]);\n" "\n" "}}\n" "\n" "check_width(LOCAL_INFO, result.width_in_GeV, " "runOptions->getValueOrDef<bool>(false, " "\"invalid_point_for_negative_width\"))" ";\n" "}}" "\n" "\n" ).format(gambit_model_name, chep_name, ", ".join(c_strings), ", ".join(g_strings)) return indent(towrite, 4)
214ba0823a779274665557193ad84fe40ccb8b2d
734
from scipy.interpolate import UnivariateSpline def interpolate_atmosphere(data, Z, s=0.25): """ This module generates a 1d array for the model plasma preesure, plasma density, temperature and mean molecular weight. """ hdata = np.array(u.Quantity(data['Z']).to(u.m)) # interpolate total pressure, temperature and density profiles pdata_f = UnivariateSpline(hdata,np.array(np.log(data['p'])),k=1, s=s) Tdata_f = UnivariateSpline(hdata,np.array(np.log(data['T'])),k=1, s=s) rdata_f = UnivariateSpline(hdata,np.array(np.log(data['rho'])),k=1, s=s) #s=0.0 to ensure all points are strictly used for ionisation state muofT_f = UnivariateSpline(hdata,np.array(np.log(data['mu'])),k=1, s=0.0) outdata = Table() outdata['Z'] = Z outdata['p'] = np.exp(pdata_f(Z.to(u.m))) * data['p'].unit outdata['T'] = np.exp(Tdata_f(Z.to(u.m))) * data['T'].unit outdata['rho'] = np.exp(rdata_f(Z.to(u.m))) * data['rho'].unit outdata['mu'] = np.exp(muofT_f(Z.to(u.m))) * u.one return outdata
3bddc5972fe0e5d4c814a6311775e7fa9777ca79
735
def exponential_coulomb_uniform_correlation_density( density, amplitude=constants.EXPONENTIAL_COULOMB_AMPLITUDE, kappa=constants.EXPONENTIAL_COULOMB_KAPPA): """Exchange energy density for uniform gas with exponential coulomb. Equation 24 in the following paper provides the correlation energy per length for 1d uniform gas with exponential coulomb interaction. One-dimensional mimicking of electronic structure: The case for exponentials. Physical Review B 91.23 (2015): 235141. https://arxiv.org/pdf/1504.05620.pdf y = pi * density / kappa correlation energy per length = -amplitude * kappa * y ** 2 / (pi ** 2) / ( alpha + beta * sqrt(y) + gamma * y + delta * sqrt(y ** 3) + eta * y ** 2 + sigma * sqrt(y ** 5) + nu * pi * kappa ** 2 / amplitude * y ** 3) correlation energy density = correlation energy per length * pi / (kappa * y) = -amplitude * y / pi / ( alpha + beta * sqrt(y) + gamma * y + delta * sqrt(y ** 3) + eta * y ** 2 + sigma * sqrt(y ** 5) + nu * pi * kappa ** 2 / amplitude * y ** 3) Note the correlation energy density converge to zero at high density limit. Args: density: Float numpy array with shape (num_grids,). amplitude: Float, parameter of exponential Coulomb interaction. kappa: Float, parameter of exponential Coulomb interaction. Returns: Float numpy array with shape (num_grids,). """ y = jnp.pi * density / kappa alpha = 2. beta = -1.00077 gamma = 6.26099 delta = -11.9041 eta = 9.62614 sigma = -1.48334 nu = 1. # The derivative of sqrt is not defined at y=0, we use two jnp.where to avoid # nan at 0. finite_y = jnp.where(y == 0., 1., y) out = -amplitude * finite_y / jnp.pi / ( alpha + beta * jnp.sqrt(finite_y) + gamma * finite_y + delta * finite_y ** 1.5 + eta * finite_y ** 2 + sigma * finite_y ** 2.5 + nu * jnp.pi * kappa ** 2 / amplitude * finite_y ** 3 ) return jnp.where(y == 0., -amplitude * y / jnp.pi / alpha, out)
dc2227f10cc64a3aa857494322f29f2b82b68da3
736
import torch def val(model, dataloader, use_gpu): """val. the CNN model. Args: model (nn.model): CNN model. dataloader (dataloader): val. dataset. Returns: tuple(int, in): average of image acc. and digit acc.. """ model.eval() # turn model to eval. mode(enable droupout layers...) result_digit = [] result_img = [] for i, (data, label) in enumerate(dataloader): with torch.no_grad(): # disable autograd if use_gpu: input = data.cuda() score = model(input) pred = decode(score) tmp = pred == label.numpy() result_digit += tmp.tolist() result_img += np.all(tmp, axis=1).tolist() i = np.random.randint(0, len(dataloader) - 1) im_show = np.transpose(input[i].detach().cpu().numpy(), (1, 2, 0)) im_show = np.repeat((im_show * 255).astype(np.uint8), 3, -1) # turn model back to training mode. model.train() return np.mean(result_img), np.mean(result_digit), [im_show, pred[i]]
8cecc0204855a5267a11edf6936b6415fafc8120
737
def ps(s): """Process String: convert a string into a list of lowercased words.""" return s.lower().split()
9bf25b31d00544d96f96564ce67ff5def9a16348
738
from matplotlib import tight_layout from scipy.ndimage import rotate def show_table_matrix(cells, colors=None, cells2=None, colors2=None, link_matrix=None, table_font_size=32, rot_45=True, return_img=False): """Draw a view of two tables together with a row-row association matrix. """ fig, axs = plt.subplots(2, 2, figsize=(12,12)) plt.subplots_adjust(wspace=0, hspace=0) axs[1,0].axis('off') # lower-left off ax=axs[0,0] # top-left make_datatable(cells, colors, columnlabels=cells.columns, table_font_size=table_font_size, ax=ax) ax=axs[1,1] # lower right, table view rotated by 90 deg CW make_datatable(cells2, colors2, columnlabels=cells2.columns, rot_90=True, table_font_size=table_font_size, ax=ax) ax=axs[0,1] # top right make_grid(ax, ncols=cells2.shape[0], # table below grid (bottom right in rot_45 view) nrows=cells.shape[0], # table left of grid (bottom left in rot_45 view) nshrink_cr=(1, 1) # shrink axis bbox to remove one extra row and column ) try: assert link_matrix.shape == (cells.shape[0], cells2.shape[0]) links = matrix_to_df(link_matrix) for row, col in links[links.values].index: rowcol = np.array(link_matrix.shape) - (row, col) - 1 doty, dotx = 2*rowcol+1 ax.plot( [dotx, dotx, 0], [0, doty, doty], linewidth=2, color='black' ) except AttributeError: pass renderer = tight_layout.get_renderer(fig) if return_img or rot_45: img = plt.imread(savefig_to_buffer(fig, format='png', bbox_inches='tight'), format='png') if rot_45: plt.close() plt.figure(figsize=(12,12)) plt.imshow(np.clip(rotate(img, angle=45, cval=1 # white background instead of grey outline ), 0, 1)) plt.axis('off') if return_img: return img
aed640392c7a85fbb2cf5cb76c185da76936c6cf
739
def login(username,password): """ 使用账号(邮箱)和密码,选择“记住我”登录 :param username: :param password: :return: """ global a a.get("https://account.fangcloud.com/login") _token = a.b.find("input",{"name":"_token"})["value"] _fstate = a.b.find("input",{"name":"_fstate"})["value"] x=a.post("https://account.fangcloud.com/login?_fstate="+_fstate, """{"login":"%s","password":"%s","remember_login":true,"login_type":"web","_fstate":"%s"}"""%(username,password, _fstate), headers={"X-CSRF-TOKEN":_token,"X-Requested-With":"XMLHttpRequest", "Content-Type":"application/json"}) result=x.json() if "redirect" not in result: raise Exception("login failed! maybe password incorrect or need captcha") url = result["redirect"] x=a.get(url, result=False, o=True, allow_redirects=True) assert 'apps/files' in x.url return True
7003be533ccb3edaff42d3f47c6882b1646a22d2
740
def rules(): """Displays a markdown doc describing the predictive modeling contest. Note ./content/contest/<url calling path>.md must be modified for contest. """ file = open('./contest/content/rules.md', 'r') rawText = file.read() file.close() content = Markup(markdown(rawText, extensions=['markdown.extensions.fenced_code', 'markdown.extensions.tables'])) return render_template('markdowntemplate.html', title='Rules', content=content)
9a5b44b87fbcee378a958586511851ef455d7988
741
def greedy_search(decoder, encoder_outputs, encoder_outputs_mask, debug=False): """ performs beam search. returns hypotheses with scores.""" batch_size = encoder_outputs.size(0) encoder_hidden_dim = encoder_outputs.size(2) assert encoder_hidden_dim == decoder._decoder_hidden_dim trg_h_t, trg_c_t = decoder._initalize_hidden_context_states( encoder_outputs, encoder_outputs_mask) max_trg_length = decoder._max_decoding_steps # Expand tensors for each beam. dec_states = (trg_h_t, trg_c_t) gen_indices = encoder_outputs.new_zeros(batch_size, max_trg_length + 1).fill_( decoder.vocab.get_token_index(START_SYMBOL, "targets")) for i in range(1, max_trg_length): decoder_input = decoder._prepare_decode_step_input( input_indices=gen_indices[:, i - 1], decoder_hidden_state=dec_states[0], encoder_outputs=encoder_outputs, encoder_outputs_mask=encoder_outputs_mask, ) logits, dec_states = decoder._decoder_step( decoder_input, dec_states[0], dec_states[1], ) transition_probs = F.softmax(logits, dim=1) # be careful if you want to change this - the orientation doesn't # work if you switch dims in view() and remove transpose() word_lk = transition_probs.view( batch_size, -1 ) scores, gen_indices[:, i] = word_lk.max(1) # TODO calculate scores def _print_sentence(indices): sent = [_get_word(decoder.vocab, word_idx.item()) for word_idx in indices[1:]] print(' '.join(sent)) if debug: for i in range(gen_indices.size(0)): _print_sentence(gen_indices[i, :]) return gen_indices.cpu().numpy(), scores
fbf9f92231c44b764c36fdb8fdfd31b0850719e0
742
import requests def get(server: t.Union[Server, str], view_or_url: str, view_data: Kwargs = None, session: requests.Session = None, params: Kwargs = None, **kwargs) -> Response: """Sends a GET request.""" return request('get', server, view_or_url, view_data=view_data, session=session, params=params, **kwargs)
e9cecdb76f6b340a258c5bb1ca3be8cb9e257764
743
import sys import os def parse_command_line(): """ Parses the command line options and prints the errors, if any occur. """ if "-h" in sys.argv or "--help" in sys.argv: terminate(HELP_TEXT, 0) if "-f" in sys.argv or "--file" in sys.argv: try: file_index = sys.argv.index("-f") + 1 except ValueError: file_index = sys.argv.index("--file") + 1 try: properties_file = sys.argv[file_index] if not os.path.isfile(properties_file): terminate(ERROR_MESSAGE.format("-f/--file"), 1) except IndexError: terminate(ERROR_MESSAGE.format("-f/--file"), 1) else: terminate("-f/--file option not specified", 1) if "-j" in sys.argv or "--json-report" in sys.argv: try: json_report_index = sys.argv.index("-j") + 1 except ValueError: json_report_index = sys.argv.index("--json-report") + 1 try: json_report_file = sys.argv[json_report_index] except IndexError: terminate(ERROR_MESSAGE.format("-j/--json-report"), 1) else: json_report_file = None generate_html_report = True if "-g" in sys.argv or "--generate-html-report" in sys.argv else False if generate_html_report and ("-r" in sys.argv or "--html-report" in sys.argv): try: html_report_index = sys.argv.index("-r") + 1 except ValueError: html_report_index = sys.argv.index("--html-report") + 1 try: html_report_file = sys.argv[html_report_index] except IndexError: terminate(ERROR_MESSAGE.format("-r/--html-report"), 1) else: html_report_file = None return properties_file, json_report_file, generate_html_report, html_report_file
3f1e76e55d2c28a05435e961201c7ce105c4d541
744
from typing import Sequence from typing import Union from pathlib import Path import os def upload_files( project: str, paths: Sequence[Union[Path, str]], target_dir: str, strip_prefix: str = "", progress_bar: bool = True, ) -> None: """Upload all provided files from the local filesystem into `target_dir` on GCS. `strip_prefix` is removed from each local filepath and the remainder is appended to `target_dir` to create the target path. Note: The bucket should be included in the target path! """ # Remove any gs:// prefix and split the bucket name off the target dir target_dir = Path(remove_prefix(target_dir, "gs://")) bucket_name = target_dir.parts[0] target_dir = str(target_dir.relative_to(bucket_name)) bucket = gcs.Client(project=project).get_bucket(str(bucket_name)) # Note: This will overwrite any blobs that already exist. def upload_file(file: Path) -> TransferEvent: blob = bucket.blob( os.path.join(target_dir, remove_prefix(str(file), strip_prefix).strip("/")) ) blob.upload_from_filename(str(file), checksum="md5") return TransferEvent(file.stat().st_size, str(file), blob.name) # Create a ThreadPool to upload multiple files in parallel with ThreadPoolExecutor() as e: futures = [e.submit(upload_file, path) for path in paths] if progress_bar: network_futures_progress_bar(futures, mode="upload", keep_order=False) else: wait(futures)
156be5f3f9124dd62873583b46546c2b6fc7cc62
745
import pandas def to_float(dataframe, column): """General Function to return floats""" dataframe[column] = dataframe[column].dropna().astype(float) dataframe[column] = dataframe[column].where(pandas.notnull(dataframe[column]), None) return dataframe[column]
2fdae992ec88e40c1e8c67711373d28390569166
746
import os def compute_kv_template(config): """Draw from a line template""" # copy to a new memory, avoid lost info _i = config['info'].copy() _d = config['data'].copy() # fill in the data if len(_d) < 1: raise ValueError('A template data is essential!') config['data'] = [] for log_name in os.listdir(_i['dir']): if log_name.split('.')[-1] != 'log': continue config['data'].append(_d[0].copy()) config['data'][-1]['path'] = os.path.join(_i['dir'], log_name) # output to jsonfile output_filename = os.path.basename(_i['dir'])+'.'+_i['task'] + '.json' output = os.path.join(_i['dir'], output_filename) # change type config['task'] = _i['task'] config.pop('info') utils.save_json(config, output) print('Config file has been saved in %s' % output) if _i['run']: return compute_kv(config)
a885bbda39dee9e685cbd9d84e2aa2f76a85230d
747
import os import csv def load_qm7(featurizer=None, split='random'): """Load qm7 datasets.""" # Featurize qm7 dataset print("About to featurize qm7 dataset.") current_dir = os.path.dirname(os.path.realpath(__file__)) dataset_file = os.path.join(current_dir, "./gdb7.sdf") qm7_tasks = ["u0_atom"] if featurizer is None: featurizer = dc.feat.CoulombMatrixEig(23) loader = dc.data.SDFLoader( tasks=qm7_tasks, smiles_field="smiles", mol_field="mol", featurizer=featurizer) dataset = loader.featurize(dataset_file) split_file = os.path.join(current_dir, "./qm7_splits.csv") split_indices = [] with open(split_file, 'r') as f: reader = csv.reader(f) for row in reader: row_int = (np.asarray(list(map(int, row)))).tolist() split_indices.append(row_int) splitters = { 'index': dc.splits.IndexSplitter(), 'random': dc.splits.RandomSplitter(), 'indice': dc.splits.IndiceSplitter(valid_indices=split_indices[1]), 'stratified': dc.splits.SingletaskStratifiedSplitter(task_number=0) } splitter = splitters[split] train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( dataset) transformers = [ dc.trans.NormalizationTransformer( transform_y=True, dataset=train_dataset) ] for transformer in transformers: train_dataset = transformer.transform(train_dataset) valid_dataset = transformer.transform(valid_dataset) test_dataset = transformer.transform(test_dataset) return qm7_tasks, (train_dataset, valid_dataset, test_dataset), transformers
6651f7f8400c0491f7d707f3f87488fbb0456fac
748
import torch def dadbt(a: torch.Tensor, diag_mat: torch.Tensor, b: torch.Tensor) -> torch.Tensor: """Batched computation of diagonal entries of (A * diag_mat * B^T) where A and B are batches of square matrices and diag_mat is a batch of diagonal matrices (represented as vectors containing diagonal entries) :param a: batch square matrices :param diag_mat: batch of diagonal matrices (represented as vecotrs containing diagonal entries :param b: batch of square matrices :returns diagonal entries of A * diag_mat * B^T""" return bmv(a * b, diag_mat)
bb2f0c9858130c556ffc5c7dfd4e337e0437aadd
749
from operator import invert def local_minima(image, footprint=None, connectivity=None, indices=False, allow_borders=True): """Find local minima of n-dimensional array. The local minima are defined as connected sets of pixels with equal gray level (plateaus) strictly smaller than the gray levels of all pixels in the neighborhood. Parameters ---------- image : ndarray An n-dimensional array. footprint : ndarray, optional The footprint (structuring element) used to determine the neighborhood of each evaluated pixel (``True`` denotes a connected pixel). It must be a boolean array and have the same number of dimensions as `image`. If neither `footprint` nor `connectivity` are given, all adjacent pixels are considered as part of the neighborhood. connectivity : int, optional A number used to determine the neighborhood of each evaluated pixel. Adjacent pixels whose squared distance from the center is less than or equal to `connectivity` are considered neighbors. Ignored if `footprint` is not None. indices : bool, optional If True, the output will be a tuple of one-dimensional arrays representing the indices of local minima in each dimension. If False, the output will be a boolean array with the same shape as `image`. allow_borders : bool, optional If true, plateaus that touch the image border are valid minima. Returns ------- minima : ndarray or tuple[ndarray] If `indices` is false, a boolean array with the same shape as `image` is returned with ``True`` indicating the position of local minima (``False`` otherwise). If `indices` is true, a tuple of one-dimensional arrays containing the coordinates (indices) of all found minima. See Also -------- skimage.morphology.local_maxima skimage.morphology.h_maxima skimage.morphology.h_minima Notes ----- This function operates on the following ideas: 1. Make a first pass over the image's last dimension and flag candidates for local minima by comparing pixels in only one direction. If the pixels aren't connected in the last dimension all pixels are flagged as candidates instead. For each candidate: 2. Perform a flood-fill to find all connected pixels that have the same gray value and are part of the plateau. 3. Consider the connected neighborhood of a plateau: if no bordering sample has a smaller gray level, mark the plateau as a definite local minimum. Examples -------- >>> from skimage.morphology import local_minima >>> image = np.zeros((4, 7), dtype=int) >>> image[1:3, 1:3] = -1 >>> image[3, 0] = -1 >>> image[1:3, 4:6] = -2 >>> image[3, 6] = -3 >>> image array([[ 0, 0, 0, 0, 0, 0, 0], [ 0, -1, -1, 0, -2, -2, 0], [ 0, -1, -1, 0, -2, -2, 0], [-1, 0, 0, 0, 0, 0, -3]]) Find local minima by comparing to all neighboring pixels (maximal connectivity): >>> local_minima(image) array([[False, False, False, False, False, False, False], [False, True, True, False, False, False, False], [False, True, True, False, False, False, False], [ True, False, False, False, False, False, True]]) >>> local_minima(image, indices=True) (array([1, 1, 2, 2, 3, 3]), array([1, 2, 1, 2, 0, 6])) Find local minima without comparing to diagonal pixels (connectivity 1): >>> local_minima(image, connectivity=1) array([[False, False, False, False, False, False, False], [False, True, True, False, True, True, False], [False, True, True, False, True, True, False], [ True, False, False, False, False, False, True]]) and exclude minima that border the image edge: >>> local_minima(image, connectivity=1, allow_borders=False) array([[False, False, False, False, False, False, False], [False, True, True, False, True, True, False], [False, True, True, False, True, True, False], [False, False, False, False, False, False, False]]) """ return local_maxima( image=invert(image), footprint=footprint, connectivity=connectivity, indices=indices, allow_borders=allow_borders )
9adaee108130b760077ba0c3698d07f37454d474
750
import torch def kron(a, b): """ Kronecker product of matrices a and b with leading batch dimensions. Batch dimensions are broadcast. The number of them mush :type a: torch.Tensor :type b: torch.Tensor :rtype: torch.Tensor """ siz1 = torch.Size(tensor(a.shape[-2:]) * tensor(b.shape[-2:])) res = a.unsqueeze(-1).unsqueeze(-3) * b.unsqueeze(-2).unsqueeze(-4) siz0 = res.shape[:-4] return res.reshape(siz0 + siz1)
b108e123817692f70f0e501c7a515171a3b08270
751
from datetime import datetime import pytz def process_query(request): """the function is called upon "news/" URL. it processes the query and calls the apifunction to fetch news articles from third party news APIs. If a query is new, it makes a fresh request to third party APIs and returns the query results and adds the query and query results into the database. Otehrwise, if the query is repeated, it fetches the results from the database; if it has not passed the expiry time( set to 120s). If it has passed the expiry team a new request is sent to the third party news APIs and the results are updated in the database. Args: request (GET) Returns: json: returns the list of query results in the form of json object. """ if request.method =='POST': return JsonResponse({'Response': 'Invalid Request type, please use "GET"'}, status=400) try: keyword = request.GET.get('query') request_time = datetime.datetime.now(pytz.UTC) obj, created = Query.objects.get_or_create( keyword = keyword ) if created==True: add_to_db(obj,keyword) elif (request_time - obj.query_time).seconds > EXPIRY_TIME: obj.query_result.all().delete() Query.objects.filter(keyword = keyword).update(query_time = request_time) add_to_db(obj,keyword) response=[] for item in obj.query_result.all(): response.append(item.to_dict()) return JsonResponse(response, safe = False, status=200) except Exception as e: print(e) return JsonResponse({'Response': 'Something went wrong'}, status=400)
000b6e4f47e06e29ff0e0ba1ba9d3311e9f40263
752
def fix_header(params, recipe, infile=None, header=None, raise_exception=False, **kwargs): """ Instrument specific header fixes are define in pseudo_const.py for an instrument and called here (function in pseudo_const.py is HEADER_FIXES) :param params: :param infile: :return: """ # deal with no header if header is None: header = infile.header hdict = infile.hdict filename = infile.filename has_infile = True else: has_infile = False hdict = Header() filename = None # load pseudo constants pconst = constants.pload(params['INSTRUMENT']) # use pseudo constant to apply any header fixes required (specific to # a specific instrument) and update the header try: header, hdict = pconst.HEADER_FIXES(params=params, recipe=recipe, header=header, hdict=hdict, filename=filename, **kwargs) except lang.drs_exceptions.DrsHeaderError as e: if raise_exception: raise e else: eargs = [e.key, e.filename] WLOG(params, 'error', TextEntry('01-001-00027', args=eargs)) # if the input was an infile return the infile back if has_infile: # return the updated infile infile.header = header infile.hdict = hdict return infile # else return the header (assuming input was a header only) else: # else return the header return header, hdict
37550406cb76b77ccb1d64e85f5f192989ad4bcd
753
from typing import List def encode(df: pd.DataFrame, cols: List[str], drop_first: bool = True) -> pd.DataFrame: """Do a dummy encoding for the columsn specified Args: df: DataFrame cols: List of columns to perform dummy encoding on drop_first: parameter for dummy encoding """ dfs = [] for col in df.columns: ds = df[col] if col not in cols: dfs.append(ds.to_frame()) else: dfs.append(pd.get_dummies(ds, prefix=col, drop_first=drop_first)) return pd.concat(dfs, axis=1)
9299378d67c69ebd964a7187431c67c12556c43b
754
from typing import Optional def binary_search(pool: list, target) -> Optional[int]: """Search for a target in a list, using binary search. Args: pool (list): a pool of all elements being searched. target: the target being searched. Returns: int: the index of the target. """ sorted_pool = sorted(pool) low = 0 high = len(sorted_pool) - 1 while low + 1 != high: mid = (low + high) // 2 if sorted_pool[mid] == target: return mid if sorted_pool[mid] < target: low = mid else: high = mid return None
7e7ef70126e02b3dc706b3b88bd950aa6322904e
755
def load_array(filename): """ Given a valid image, load the image and return the pixels as a numpy array :param filename: The filename as a string :returns: A numpy array which stores the pixel data from a snowmap Convention is as follows: pixels that read 0,0,0, 255 are read as snow-free and contain the value 0; pixels that read 0,0,0,0 assume no data and return -1, and pixels that read (255, 255, 255, 255) are read as snow and get the value 1 """ image = Image.open(filename) image.load() height, width = image.size snowmap = np.zeros((height, width), dtype=int) for row in range(height): for col in range(width): a = image.getpixel((row,col)) if a == (0, 0, 0, 255): # This is no snow snowmap[row, col] = 0 elif a == (0, 0, 0, 0): # this is no data snowmap[row, col] = -1 elif a == (255, 255, 255, 255): # that's for snow snowmap[row, col] = 1 else: raise ValueError("Unknown Pixel value {}".format(a)) return snowmap
829e97936fb63486bc1c373bdf283f02dbb833bd
756
def create_anchors_3d_stride(feature_size, anchor_strides, sizes=[1.6, 3.9, 1.56], anchor_offsets=[0, -20, -1], # [0.2, -39.8, -1.78], rotations=[0, 1.57], # np.pi / 2 dtype=np.float32): """ Args: feature_size: list [D, H, W](zyx) sizes: [N, 3] list of list or array, size of anchors, xyz Returns: anchors: [*feature_size, num_sizes, num_rots, 7] tensor. """ # almost 2x faster than v1 x_stride, y_stride, z_stride = anchor_strides x_offset, y_offset, z_offset = anchor_offsets z_centers = np.arange(feature_size[0], dtype=dtype) y_centers = np.arange(feature_size[1], dtype=dtype) x_centers = np.arange(feature_size[2], dtype=dtype) z_centers = z_centers * z_stride + z_offset y_centers = y_centers * y_stride + y_offset x_centers = x_centers * x_stride + x_offset sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3]) rotations = np.array(rotations, dtype=dtype) rets = np.meshgrid( x_centers, y_centers, z_centers, rotations, indexing='ij') tile_shape = [1] * 5 tile_shape[-2] = int(sizes.shape[0]) for i in range(len(rets)): rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape) rets[i] = rets[i][..., np.newaxis] # for concat sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3]) tile_size_shape = list(rets[0].shape) tile_size_shape[3] = 1 sizes = np.tile(sizes, tile_size_shape) rets.insert(3, sizes) ret = np.concatenate(rets, axis=-1) return np.transpose(ret, [2, 1, 0, 3, 4, 5])
2d6d31a45c5f2f0a9adfe39195ae37719d78fd73
757
def sample_unit(name='oz'): """Create and return a sample unit""" return Unit.objects.create(name=name)
affa250d46b5b50e69af013035f6b73b45b787b4
758
def CreateBlendCurve2(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1, multiple=False): """ Makes a curve blend between 2 curves at the parameters specified with the directions and continuities specified Args: curve0 (Curve): First curve to blend from t0 (double): Parameter on first curve for blend endpoint reverse0 (bool): If false, the blend will go in the natural direction of the curve. If true, the blend will go in the opposite direction to the curve continuity0 (BlendContinuity): Continuity for the blend at the start curve1 (Curve): Second curve to blend from t1 (double): Parameter on second curve for blend endpoint reverse1 (bool): If false, the blend will go in the natural direction of the curve. If true, the blend will go in the opposite direction to the curve continuity1 (BlendContinuity): Continuity for the blend at the end Returns: Curve: The blend curve on success. None on failure """ url = "rhino/geometry/curve/createblendcurve-curve_double_bool_blendcontinuity_curve_double_bool_blendcontinuity" if multiple: url += "?multiple=true" args = [curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1] if multiple: args = list(zip(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
08553ffbc307e792f5f794812c6b54d6bc995766
759
def fetch_website(url, user_agent, results_location_dir): """function to use for website fetch :param url: url to fetch information from :param user_agent: user agent string that is used by the minion in making the fetch :param results_location_dir: the location to where the results are stored :return: results_data - a dictionary of metadata on the fetch This method uses a different library than the basic fetch method, Ghost.py (documentation at http://ghost-py.readthedocs.io/en/latest/#). After cleaning the url, a session is opened with the user agent string passed in. Then the specific web page is opened and all the resources of the web page are collected. After that, a screen-shot of the web page is collected. Then, the page data is written to a file that is named from the session id. Then each resource gathered during the fetch is written to a file, and these are placed in the same directory as the page data. Beyond that, miscellaneous metadata is written to the results_data dictionary. """ log_debug("fetch_website", "Entering fetch_website") # clean the url url_clean = url.lstrip() log_debug("fetch_website", "Starting Fetch of: " + url_clean) # start a Ghost.py session session = Ghost().start(user_agent=user_agent) results_data = {'requested_url': url, 'actual_url': url_clean, 'remote_job_id': str(session.id)} try: # open the web page and gather all the page's resources page, resources = session.open(address=url_clean, user_agent=user_agent) # catch a TimeoutError except (ghost.TimeoutError, ghost.Error): results_data['connection_success'] = False log_debug("fetch_website", "Connection Failed for Fetch: " + url_clean) return results_data except Exception as e: print type(e) print str(e) return results_data # if page is None and there are no resources, that means that a connection to the page failed if page is None and len(resources) == 0: log_debug("fetch_website", "") results_data['connection_success'] = False else: netloc = urlparse(url_clean).netloc log_debug("fetch_website", "Attempting to capture screenshot of {}".format(netloc)) try: # capture a screen-shot of the web page session.capture_to("{}/{}.png".format(results_location_dir, netloc)) log_debug("fetch_website", "Successful capture of screenshot of {}".format(netloc)) except Exception as e: log_debug("fetch_website", "Failed to capture screenshot of {}".format(netloc)) print type(e) print str(e) try: log_debug("fetch_website", "Opening: {}/{} for: {}".format(results_location_dir, session.id, url_clean)) fetch_file = open("{}/{}".format(results_location_dir, session.id), 'w') log_debug("fetch_website", "writing page content to file") # write page content to file fetch_file.write(page.content) log_debug("fetch_website", "closing {}".format(session.id)) fetch_file.close() # write the data of each resource to different files for resource in resources: log_debug("fetch_website", "opening {}/resource{} for: {}".format(results_location_dir, resources.index(resource), url_clean)) data_file = open("{}/resource{}".format(results_location_dir, resources.index(resource)), "w") log_debug("fetch_website", "writing content to {}".format(resources.index(resource))) data_file.write(resource.content) log_debug("fetch_website", "closing {}".format(resources.index(resource))) data_file.close() results_data['fetch_object_success'] = True except: results_data['fetch_object_success'] = False finally: # collect more metadata results_data['connection_success'] = True results_data['server_info'] = dict(page.headers) results_data['response_code'] = page.http_status if page.http_status in [400, 404, 403, 401]: results_data["fetch_success"] = False if len(session.cookies) > 0: results_data['cookies'] = [x.value().data() for x in session.cookies] return results_data
085ddee37e495c1e0318c6582ea061cbd2bed5a4
760
def make_raster_from_images(modeladmin, request, queryset): """Make a raster of the selected `ImageMeta`s. This is an action on `ImageMeta` """ imset = make_image_set_from_images(modeladmin, request, queryset) return _make_raster_from_image_set(imset)
1d5d855d986ee37875b85ec119dacd51f4af5e69
761
def is_rotation(first, second): """Given two strings, is one a rotation of the other.""" if len(first) != len(second): return False double_second = second + second return first in double_second
f02576761014e1dc395f88f937dfdd0de15508d2
762
def bin_entities(uri_set, delimiter="/", splitpos=-1): """ Takes iteratable elemts and splits them according to the position (splitpos) of the delimiter. The first part is used as a key, whereas the second appended to a list connected to the former key. return: dict {key1: [id11, id12, id13, …], key2: […}} """ ent_dict = dict() for res in uri_set: # split entity up to splitpos using delimiter entity = delimiter.join(res.split(delimiter)[:splitpos]) # id_ is the remainder id_ = delimiter.join(res.split(delimiter)[splitpos:]) if entity in ent_dict: ent_dict[entity].append(id_) else: ent_dict[entity] = [id_] return ent_dict
fcbcddbff909d74fe14fe7cb3a21560c8ca9549a
763
def frequency(state_1, state_2): """ The frequency interval between state_1 and state_2 in GHz. """ return 1e-9 * interval(state_1, state_2) / h
6276f946e08d9b2e115f004395b5cf420f048c68
764
from typing import OrderedDict def dac(dns_val=None) -> OrderedDict: """ Domain Availability Checker (DNS lookup) :param _dns: URL string :return: Availability [True, False] """ ip_values = None avail = False if dns_val is None: raise ValueError("Sorry, DNS is needed") if isinstance(dns_val, str) is False: raise TypeError("Sorry, \'DNS\' must be type \'str\'") try: output = dns.resolver.resolve(dns_val, 'A') ip_values = [ipval.to_text() for ipval in output] except dns.resolver.NXDOMAIN: avail = True return OrderedDict([ ("DNS", dns_val), ("IP", ip_values), ("AVAIL", avail), ])
d2c4097686f2edb17fbd674098592ec797ecac46
765
def display_timestamp(num_seconds): """get a string to conveniently display a timestamp""" seconds = num_seconds % 60 minutes = int(num_seconds / 60) % 60 hrs = int(num_seconds / 3600) return "{}:{}:{}".format(hrs, minutes, seconds)
bdcc34ade38855df910d5005f6dac9b5e826f543
766
def get_bloglist(content_dict={}): """ 输入的指令为-m,则列出博客的文章列表 :param content_dict: :return: """ bloglist = crawlBlog.get_archives(5) tousername = content_dict["FromUserName"] fromusername = content_dict["ToUserName"] return WeixinUtils.make_news(bloglist, tousername, fromusername)
541fbf7f10f137b995fd0d9a91e8bc651b90b697
767
import bisect def get_closest(arr, value): """ Return the array values closest to the request value, or +/-inf if the request value is beyond the range of the array Parameters ---------- arr : sequence array of values value : numeric Returns ------- 2-tuple: largest value in array less than value (or -inf) and smallest value in array larger than value (or +inf) """ arr_sorted = sorted(arr) index = bisect(arr_sorted, value) lower_limit = -np.inf if index == 0 else arr_sorted[index - 1] upper_limit = np.inf if index == len(arr_sorted) else arr_sorted[index] return lower_limit, upper_limit
e59216c7d0332ae91e75583b7dc42f956c785e4c
768
def filename(config, key, ext = '.h5', set = ''): """ Get the real file name by looking up the key in the config and suffixing. :param key: key to use in the config :type key: str :param ext: extension to use :type ext: str :param set: set name :type set: str :return: filepath :rtype: str """ name = config[key] + '_' if set: name += set + '_' name += str(config['multiplier']) + '_' + str(config['height']) + 'x' + str(config['width']) + 'x' + str(config['depth'])\ if ext: name += ext return name
f389a48e7e06a31722423857814149f474e46316
769
def XIRR( values: func_xltypes.XlArray, dates: func_xltypes.XlArray, guess: func_xltypes.XlNumber = 0.1 ) -> func_xltypes.XlNumber: """Returns the internal rate of return for a schedule of cash flows that is not necessarily periodic. https://support.microsoft.com/en-us/office/ xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d Algorithm found on stackoverflow: https://stackoverflow.com/questions/63797804/ python-irr-function-giving-different-result-than-excel-xirr From MS, Newton's method is used to optimize: https://docs.microsoft.com/en-us/office/troubleshoot/excel/ algorithm-of-xirr-funcation """ values = values.flatten(func_xltypes.Number, None) dates = dates.flatten(func_xltypes.DateTime, None) # need to cast dates and guess to Python types else optimizer complains dates = [float(date) for date in dates] guess = float(guess) # TODO: Ignore non numeric cells and boolean cells. if len(values) != len(dates): raise xlerrors.NumExcelError( f'`values` range must be the same length as `dates` range ' f'in XIRR, {len(values)} != {len(dates)}') series = pd.DataFrame({"dates": dates, "values": values}) # Filter all rows with 0 cashflows series = series[series['values'] != 0] # Sort dataframe by date series = series.sort_values('dates', ascending=True) series['values'] = series['values'].astype('float') # Create separate lists for values and dates series_values = list(series['values']) series_dates = list(series['dates']) # Calculate IRR return _xirr(series_values, series_dates, guess)
16350cdeb4ddaff4b62d036d4be78deac2613323
770
def isUp(): """ Whether this docker container is up """ return 'True'
e99c32dee79c4df516193c1a9d3fb8d34f8b0abc
771
def rand_perm_(img, x, y, x_max, y_max, kernel, flatten): """ Applies INPLACE the random permutation defined in `kernel` to the image `img` on the zone defined by `x`, `y`, `x_max`, `y_max` :param img: Input image of dimension (B*C*W*H) :param x: offset on x axis :param y: offset on y axis :param x_max: end of the zone to permute on the x axis :param y_max: end of the zone to permute on the y axis :param kernel: LongTensor of dim 1 containing one value for each point in the zone to permute :return: the permuted image. """ assert img.dim() == 4 if img.size(1) != 1: raise NotImplementedError('Not Implemented for multi-channel images') zone = img[:, :, x:x_max, y:y_max].contiguous() img[:, :, x:x_max, y:y_max] = zone.view(zone.size(0), -1)\ .index_select(1, kernel).view(zone.size()) return img.view(img.size(0), -1) if flatten else img
c838840c2428320825486c0cdacf23f5fb40a9a6
772
import torch def test(model, data_loader, use_cuda, loss_func): """ The function to evaluate the testing data for the trained classifiers :param model: :param data_loader: :param use_cuda: :return: """ softmax = torch.nn.Softmax(dim=1) columns = ['participant_id', 'session_id', 'slice_id', 'true_label', 'predicted_label', 'proba0', 'proba1'] results_df = pd.DataFrame(columns=columns) total_loss = 0 if use_cuda: model.cuda() model.eval() # set the model to evaluation mode torch.cuda.empty_cache() with torch.no_grad(): for i, data in enumerate(data_loader): if use_cuda: imgs, labels = data['image'].cuda(), data['label'].cuda() else: imgs, labels = data['image'], data['label'] output = model(imgs) normalized_output = softmax(output) loss = loss_func(output, labels) total_loss += loss.item() _, predicted = torch.max(output.data, 1) # Generate detailed DataFrame for idx, sub in enumerate(data['participant_id']): row = [sub, data['session_id'][idx], data['slice_id'][idx].item(), labels[idx].item(), predicted[idx].item(), normalized_output[idx, 0].item(), normalized_output[idx, 1].item()] row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns) results_df = pd.concat([results_df, row_df]) del imgs, labels, output torch.cuda.empty_cache() # calculate the balanced accuracy results = evaluate_prediction(results_df.true_label.values.astype(int), results_df.predicted_label.values.astype(int)) results_df.reset_index(inplace=True, drop=True) results['total_loss'] = total_loss torch.cuda.empty_cache() return results_df, results
37859b0f477326a8a606205c84ea0827d43925d8
773
def is_fundamental_error(path, error): """ Returns True if error is not field related. (So type related, for example.) """ return not is_any_field_error(path, error)
8eef548f9520cbd92ff3989f2f11b180e8099981
774
import os def check_git_modified(clinfo): """로컬 git 저장소 변경 여부. Commit 되지 않거나, Push 되지 않은 내용이 있으면 경고 Returns: bool: 변경이 없거나, 유저가 확인한 경우 True """ nip = _get_ip( clinfo['instance']['notebook'], clinfo['profile'].get('private_command') ) user = clinfo['template']['notebook']['ssh_user'] private_key = clinfo['template']['notebook']['ssh_private_key'] git_dirs = clinfo['git_cloned_dir'] uncmts = [] unpushs = [] for git_dir in git_dirs: cmd = "cd {} && git status --porcelain | grep '^ M.*'".format(git_dir) _uncmts, _ = send_instance_cmd(user, private_key, nip, cmd) uncmts += [os.path.join(git_dir, u) for u in _uncmts if len(u) > 0] cmd = "cd {} && git cherry -v".format(git_dir) _unpushs, _ = send_instance_cmd(user, private_key, nip, cmd) unpushs += [os.path.join(git_dir, u) for u in _unpushs if len(u) > 0] uncmt_cnt = len(uncmts) unpush_cnt = len(unpushs) if uncmt_cnt > 0 or unpush_cnt > 0: print() print("There are {} uncommitted file(s) and {} unpushed commits(s)!". format(uncmt_cnt, unpush_cnt)) if uncmt_cnt > 0: print() print("Uncommitted file(s)") print("-------------------") for f in uncmts: print(f.strip()) if unpush_cnt > 0: print() print("Unpushed commit(s)") print("-------------------") for f in unpushs: print(f.strip()) print() ans = '' while ans.lower() not in ('y', 'n'): ans = input("Are you sure to destroy this cluster? (y/n): ") return ans == 'y' return True
5aa223ef3811b7d4078c9c7f57ef5e2fb1afdb18
775
def migrate_to_latest(json_dict, info): """Migrates the STAC JSON to the latest version Args: json_dict (dict): The dict of STAC JSON to identify. info (STACJSONDescription): The info from :func:`~pystac.serialzation.identify.identify_stac_object` that describes the STAC object contained in the JSON dict. Returns: dict: A copy of the dict that is migrated to the latest version (the version that is pystac.STAC_VERSION) """ result = deepcopy(json_dict) version = info.version_range.latest_valid_version() if version != STAC_VERSION: _object_migrations[info.object_type](result, version, info) for ext in info.common_extensions: _extension_migrations[ext](result, version, info) result['stac_version'] = STAC_VERSION return result
0e159ea565038a4b8fa8b2525c8adc35cbd97dc6
776
def contact_infectivity_symptomatic_20x50(): """ Real Name: b'contact infectivity symptomatic 20x50' Original Eqn: b'contacts per person symptomatic 20x50*infectivity per contact' Units: b'1/Day' Limits: (None, None) Type: component b'' """ return contacts_per_person_symptomatic_20x50() * infectivity_per_contact()
b6472192451dcf484cbe7ac802c06750c3d63fff
777
def smart_wn_search(wn, query, pos=None, report_file=None, compact=True, lang='eng', with_eng=True): """ Search synset in WordNet Gloss Corpus by term""" if report_file is None: report_file = TextReport() # Default to stdout report_file.print("Search Wordnet: Query=%s | POS=%s" % (query, pos)) with wn.ctx() as ctx: synsets = search_wn_full_text(wn, query, pos=pos, lang=lang, ctx=ctx) if with_eng and lang != 'eng': synsets_eng = SynsetCollection() for synset in synsets: synset_eng = wn.get_synset(synset.ID, lang='eng', ctx=ctx) synsets_eng.add(synset_eng) dump_synsets(synsets, synsets_eng, report_file=report_file, compact=compact) else: dump_synsets(synsets, report_file=report_file, compact=compact) return synsets
4d600ca77c6e4012225dfc4b1212739542817c83
778
def _parse_integrator(int_method): """parse the integrator method to pass to C""" #Pick integrator if int_method.lower() == 'rk4_c': int_method_c= 1 elif int_method.lower() == 'rk6_c': int_method_c= 2 elif int_method.lower() == 'symplec4_c': int_method_c= 3 elif int_method.lower() == 'symplec6_c': int_method_c= 4 elif int_method.lower() == 'dopr54_c': int_method_c= 5 elif int_method.lower() == 'dop853_c': int_method_c= 6 else: int_method_c= 0 return int_method_c
20a44b596860fdaa72b5aa37c7853bbcf47c3c91
779
import time def get_diffusion_features(repo_path, branch): """ Function that extracts the first commits diffusion features. It then starts a number of processes(equal to the number of cores on the computer), and then distributes the remaining commits to them. """ repo = Repository(repo_path) head = repo.references.get(branch) commits = list( repo.walk(head.target, GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE)) initial = commits[0] init_tree = initial.tree # Count inital total lines of code init_total_additions = 0 init_file_addtions = [] init_subdirectories = 0 init_modules = 0 for entry in init_tree: if entry.type == "tree": added, file_additions, subdirectories = parse_tree(entry, repo) init_modules += 1 init_file_addtions.extend(file_additions) init_total_additions += added init_subdirectories += subdirectories else: try: additions = len(str(repo[entry.id]).split('\n')) init_total_additions += additions init_file_addtions.append(additions) except: continue diffusion_features = [] diffusion_features.append(initial.hex) diffusion_features.append(init_subdirectories) diffusion_features.append(init_modules) diffusion_features.append( count_entropy(init_file_addtions, init_total_additions)) # Check how many processes that could be spawned cpus = cpu_count() print("Using {} cpus...".format(cpus)) # Divide the commits eqaully between the processes. quote, remainder = divmod(len(commits), cpus) processes = [ Process( target=parse_diffusion_features, args=(i, repo_path, branch, i * quote + min(i, remainder), (i + 1) * quote + min(i + 1, remainder))) for i in range(cpus) ] for process in processes: process.start() start_time = time.time() for process in processes: process.join() end_time = time.time() print("Done") print("Overall processing time {}".format(end_time - start_time)) # Assemble the results features = [] for _, feat in RES.items(): features.extend(feat) features = list(reversed(features)) features.append(diffusion_features) return features
cd94b722c0f98d55206a5c5cad32e6f855ae304d
780
def convert_example(example, tokenizer, label_list, max_seq_length=512, is_test=False): """ Builds model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. And creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If only one sequence, only returns the first portion of the mask (0's). Args: example(obj:`list[str]`): List of input data, containing text and label if it have label. tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. label_list(obj:`list[str]`): All the labels that the data has. max_seq_len(obj:`int`): The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. is_test(obj:`False`, defaults to `False`): Whether the example contains label or not. Returns: input_ids(obj:`list[int]`): The list of token ids. token_type_ids(obj: `list[int]`): List of sequence pair mask. label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test. """ text = example encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length) input_ids = encoded_inputs["input_ids"] token_type_ids = encoded_inputs["token_type_ids"] if not is_test: # create label maps label_map = {} for (i, l) in enumerate(label_list): label_map[l] = i label = label_map[label] label = np.array([label], dtype="int64") return input_ids, token_type_ids, label else: return input_ids, token_type_ids
986503a34f055b890f5979ca146708c1b45a45fe
781
import yaml import json def load_config_file(filepath): """ Load a configuration as an options dict. Format of the file is given with filepath extension. :param filepath: :type filepath: :return: :rtype: """ if filepath.endswith('.json'): with open(filepath) as config_file_data: return json.load(config_file_data) if filepath.endswith('.yaml') or filepath.endswith('.yml'): try: with open(filepath) as config_file_data: return yaml.load(config_file_data) except ImportError: # pragma: no cover raise ConfigurationException('Configuration file extension is not supported. ' 'PyYAML should be installed to support "%s" file' % ( filepath,)) try: # Try to load input as JSON return json.loads(filepath) except: # pylint: disable=bare-except pass raise ConfigurationException('Configuration file extension is not supported for "%s" file.' % (filepath,))
60b5ea592f8f101be279cfe5897e70fbef11f9b0
782
def coordinator_setup(start_heart=True): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout member_id = get_member_id() if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver(member_id) coordinator.start(start_heart=start_heart) return coordinator
b39b736e39b6c98badd148ac111b01dae85eea2f
783
def _to_jraph(example): """Converts an example graph to jraph.GraphsTuple.""" example = jax.tree_map(lambda x: x._numpy(), example) # pylint: disable=protected-access edge_feat = example['edge_feat'] node_feat = example['node_feat'] edge_index = example['edge_index'] labels = example['labels'] num_nodes = example['num_nodes'] senders = edge_index[:, 0] receivers = edge_index[:, 1] return jraph.GraphsTuple( n_node=num_nodes, n_edge=np.array([len(edge_index) * 2]), nodes=node_feat, edges=np.concatenate([edge_feat, edge_feat]), # Make the edges bidirectional senders=np.concatenate([senders, receivers]), receivers=np.concatenate([receivers, senders]), # Keep the labels with the graph for batching. They will be removed # in the processed batch. globals=np.expand_dims(labels, axis=0))
e3bc4bb468ae4e6dfbb4387c0a913a87ba76ac26
784
def get_urls(page_links): """Insert page links, return list of url addresses of the json""" urls = [] for link in page_links: link1 = link.replace('v3', 'VV') game_id = ''.join([char for char in link1 if char in list(map(str, list(range(10))))]) json_url = f'http://www.afa.com.ar/deposito/html/v3/htmlCenter/data/deportes/futbol/primeraa/events/{game_id}.json' urls.append(json_url) return urls
68c6796ad5a77676674252a0060776eabc4fb8e0
785
def KK_RC79_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] R49 = params["R49"] R50 = params["R50"] R51 = params["R51"] R52 = params["R52"] R53 = params["R53"] R54 = params["R54"] R55 = params["R55"] R56 = params["R56"] R57 = params["R57"] R58 = params["R58"] R59 = params["R59"] R60 = params["R60"] R61 = params["R61"] R62 = params["R62"] R63 = params["R63"] R64 = params["R64"] R65 = params["R65"] R66 = params["R66"] R67 = params["R67"] R68 = params["R68"] R69 = params["R69"] R70 = params["R70"] R71 = params["R71"] R72 = params["R72"] R73 = params["R73"] R74 = params["R74"] R75 = params["R75"] R76 = params["R76"] R77 = params["R77"] R78 = params["R78"] R79 = params["R79"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) + (R44 / (1 + w * 1j * t_values[43])) + (R45 / (1 + w * 1j * t_values[44])) + (R46 / (1 + w * 1j * t_values[45])) + (R47 / (1 + w * 1j * t_values[46])) + (R48 / (1 + w * 1j * t_values[47])) + (R49 / (1 + w * 1j * t_values[48])) + (R50 / (1 + w * 1j * t_values[49])) + (R51 / (1 + w * 1j * t_values[50])) + (R52 / (1 + w * 1j * t_values[51])) + (R53 / (1 + w * 1j * t_values[52])) + (R54 / (1 + w * 1j * t_values[53])) + (R55 / (1 + w * 1j * t_values[54])) + (R56 / (1 + w * 1j * t_values[55])) + (R57 / (1 + w * 1j * t_values[56])) + (R58 / (1 + w * 1j * t_values[57])) + (R59 / (1 + w * 1j * t_values[58])) + (R60 / (1 + w * 1j * t_values[59])) + (R61 / (1 + w * 1j * t_values[60])) + (R62 / (1 + w * 1j * t_values[61])) + (R63 / (1 + w * 1j * t_values[62])) + (R64 / (1 + w * 1j * t_values[63])) + (R65 / (1 + w * 1j * t_values[64])) + (R66 / (1 + w * 1j * t_values[65])) + (R67 / (1 + w * 1j * t_values[66])) + (R68 / (1 + w * 1j * t_values[67])) + (R69 / (1 + w * 1j * t_values[68])) + (R70 / (1 + w * 1j * t_values[69])) + (R71 / (1 + w * 1j * t_values[70])) + (R72 / (1 + w * 1j * t_values[71])) + (R73 / (1 + w * 1j * t_values[72])) + (R74 / (1 + w * 1j * t_values[73])) + (R75 / (1 + w * 1j * t_values[74])) + (R76 / (1 + w * 1j * t_values[75])) + (R77 / (1 + w * 1j * t_values[76])) + (R78 / (1 + w * 1j * t_values[77])) + (R79 / (1 + w * 1j * t_values[78])) )
386f84adf3dd4a1b122ef1ef9572f1d3733fb94c
786
def _resampling_from_str(resampling: str) -> Resampling: """ Match a rio.warp.Resampling enum from a string representation. :param resampling: A case-sensitive string matching the resampling enum (e.g. 'cubic_spline') :raises ValueError: If no matching Resampling enum was found. :returns: A rio.warp.Resampling enum that matches the given string. """ # Try to match the string version of the resampling method with a rio Resampling enum name for method in rio.warp.Resampling: if str(method).replace("Resampling.", "") == resampling: resampling_method = method break # If no match was found, raise an error. else: raise ValueError( f"'{resampling}' is not a valid rasterio.warp.Resampling method. " f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}" ) return resampling_method
e0e4020361313205fd0afc90e19bb02ebe0d5abb
787
def _call_twitter_api(query): """helper function to call twitter api Args: query (str): query string made by _preprocess_query function Returns: generator: response object in generator """ return sntwitter.TwitterSearchScraper(query=query).get_items()
3b75150e7a83e7dfdbc5bd836745af13d3b5a90f
788
from typing import List import re def parse_superfamilies(filepath: str) -> List[Method]: """ Parse the CathNames.txt file distributed with CATH-Gene3D releases :param filepath: :return: """ signatures = [] reg = re.compile(r"^(\d\.\d+\.\d+\.\d+)\s+([a-zA-Z0-9]+)\s+:(.*)$") with open(filepath, "rt") as fh: for line in fh: if line[0] == '#': continue m = reg.match(line) if m is None: continue supfam, model, name = m.groups() accession = f"{_PREFIX}{supfam}" m = Method(accession, _TYPE_SUPFAM, description=name) signatures.append(m) return signatures
db91d288133ed64b27ffdc1852ca5d62390792eb
789
def Weekday(datetime): """Returns a weekday for display e.g. Mon.""" return datetime.strftime('%a')
bae413f0fa86f9e27bd6d7f6ee4480a6ddd564e7
790
def host_list(request): """List all code hosts :rtype: json """ hosts = Host.objects.all() serializer = host_serializer(hosts, many=True) return JsonResponse(serializer.data, safe=False)
0f41acd02beef4e656e13da9bf7401b5c24c138c
791
def columns_not_changed(df, col_to_keep): """ insert the clean columns as features without changing the columns :param df: dataframe :param col_to_keep: columns that are clean and should not be changed :return unchanged columns plus SK_ID_CURR """ df = df.loc[:, ['SK_ID_CURR'] + col_to_keep] df.loc[df.DAYS_ID_PUBLISH > 0, :] = np.nan col_to_turn_positive = ['DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH'] df[col_to_turn_positive] = df[col_to_turn_positive].abs() return df
138a21dc6b84dc03678d68c9ce279accac3b853a
792
def removeRestaurantFromList(update: Update, context: CallbackContext) -> int: """Removes the current restaurant from the current preferred list.""" query = update.callback_query query.answer() # Removing the restaurant from the list in the database removeRestaurantFromListDb( context.chat_data.get("current_list_restaurants").restaurants.current.id, context.chat_data.get("current_list_restaurants").id, ) # Removing the restaurant even from the local list context.chat_data.get("current_list_restaurants").restaurants.remove() # Sending a message of success and displaying back the updated list. context.bot.edit_message_text( chat_id=update.effective_chat.id, message_id=context.chat_data.get("fav_list_message_id"), text=getString( "GENERAL_RestaurantRemovedFromFavList", context.chat_data.get("lang") ), ) # Sending a new empty message which will be overwritten immediately by going back to the RESTAURANT_INFOS_DISPLAY state. newId = context.bot.send_message( chat_id=update.effective_chat.id, text="_" ).message_id # Storing the new id which will be used from now on to modify the message. context.chat_data.update({"fav_list_message_id": newId}) return showCurrentFavRestaurant(update, context)
7150744f16c53d43c8ba789df90afd98bba7c16d
793
def _process_null(_): """ Placeholder for an efficient replacement for when no columns of a `WaveformReducer` are activated. """ return dict()
377b355104a01d93916e8a5e91934f0be79d1b13
794
def GenerateRst(proto_file): """Generate a RST representation from a FileDescriptor proto.""" source_code_info = SourceCodeInfo(proto_file.name, proto_file.source_code_info) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', source_code_info.file_level_comment, proto_file.name) package_prefix = NormalizeFQN('.' + proto_file.package + '.')[:-1] package_type_context = TypeContext(source_code_info, package_prefix) msgs = '\n'.join( FormatMessage(package_type_context.ExtendMessage(index, msg.name), msg) for index, msg in enumerate(proto_file.message_type)) enums = '\n'.join( FormatEnum(package_type_context.ExtendEnum(index, enum.name), enum) for index, enum in enumerate(proto_file.enum_type)) debug_proto = FormatProtoAsBlockComment(proto_file) return header + comment + msgs + enums # + debug_proto
433deed0df185b2b4bbd655e2d7f1d466cd6af6b
795
import torch def make_padded_batch(items): """ Pads sequences in a batch, so they are all the same length as the longest. """ max_len = max(len(d["input_ids"]) for d in items) if max_len == 0: return {k: torch.zeros((0, 0), dtype=torch.long) for k in items[0]} return { k: pad_sequence([d[k] for d in items if len(d["input_ids"])], batch_first=True) for k, v in items[0].items() }
8f11f83a4070845259c9549307f6d8eccee92545
796
import ctypes def hlmlEventSetCreate() -> hlml_t.HLML_EVENT_SET.TYPE: """ Create an empty set of events Parameters: None. Returns: st (HLML_EVENT_SET) - An empty set of events. """ global _hlmlOBJ st = hlml_t.HLML_EVENT_SET.TYPE fn = _hlmlOBJ.get_func_ptr("hlml_event_set_create") ret = fn(ctypes.byref(st)) check_return(ret) return st
b5e05c9a0acdb580db2e80e46978857cbd869feb
797
def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', '1.2.3. 4') Traceback (most recent call last): VdtValueError: the value "1.2.3. 4" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, StringTypes): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value
ba4a8ead84ee43ea76d14b24838ef83dca651691
798
def batchEuclid (features, patterns, knowledge): """ Classifies whole dataset via euclidean distance. Returns score. """ dists = euclidean_distances (knowledge, features) preds = np.array (dists).argmin (axis = 0) truthVector = (preds.T.astype (float) == patterns) pos = truthVector.sum () score = pos / features.shape[0] return score
76ebcda00b28b4456454d5d1e1b5fe7aeb4e8314
799