content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import numpy def add_param_starts(this_starts, params_req, global_conf, run_period_len, start_values_min, start_values_max): """Process the param starts information taken from the generator, and add it to the array being constructed. Inputs: this_starts: a tuple with (starts_min, starts_max), the output from a generator's get_param_starts() function. params_req: integer, the number of parameters this generator requires global_conf: a dict including 'min_param_val' and 'max_param_val' run_period_len: the number of periods to run for start_values_min: the array to append the min start values to start_values_max: the array to append the max start values to Outputs: start_values_min, start_values_max, updated versions (not necessarily in-place) """ (starts_min, starts_max) = this_starts starts_min = numpy.array(starts_min) starts_max = numpy.array(starts_max) if starts_min.size == 0: start_values_min = numpy.hstack((start_values_min, ( (numpy.ones((run_period_len, params_req)) * global_conf['min_param_val']).tolist()))) else: start_values_min = numpy.hstack((start_values_min, starts_min)) if starts_max.size == 0: start_values_max = numpy.hstack((start_values_max, ( (numpy.ones((run_period_len, params_req)) * global_conf['max_param_val']).tolist()))) else: start_values_max = numpy.hstack((start_values_max, starts_max)) return start_values_min, start_values_max
b50f538b9d5096fe6061b4b990ccb9ad6ba05ef6
1,489
def codegen_reload_data(): """Parameters to codegen used to generate the fn_ansible_tower package""" reload_params = {"package": u"fn_ansible_tower", "incident_fields": [], "action_fields": [u"ansible_tower_arguments", u"ansible_tower_credential", u"ansible_tower_hosts", u"ansible_tower_inventory", u"ansible_tower_job_name", u"ansible_tower_module", u"ansible_tower_module_arguments", u"ansible_tower_run_tags", u"ansible_tower_skip_tags", u"job_status", u"last_updated", u"tower_project", u"tower_save_as", u"tower_template_pattern"], "function_params": [u"incident_id", u"tower_arguments", u"tower_credential", u"tower_hosts", u"tower_inventory", u"tower_job_id", u"tower_job_status", u"tower_last_updated", u"tower_module", u"tower_project", u"tower_run_tags", u"tower_save_as", u"tower_skip_tags", u"tower_template_id", u"tower_template_name", u"tower_template_pattern"], "datatables": [u"ansible_tower_job_templates", u"ansible_tower_launched_jobs"], "message_destinations": [u"fn_ansible_tower"], "functions": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command"], "phases": [], "automatic_tasks": [], "scripts": [], "workflows": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command", u"ansible_tower_run_job__artifact", u"ansible_tower_run_job__incident"], "actions": [u"Ansible Tower Get Ad Hoc Command Results", u"Ansible Tower Get Job Results", u"Ansible Tower List Job Templates", u"Ansible Tower List Jobs", u"Ansible Tower Run an Ad Hoc Command", u"Ansible Tower Run Job", u"Ansible Tower Run Job - Artifact", u"Ansible Tower Run Job - Incident"], "incident_artifact_types": [] } return reload_params
49dee7d9a1dc297ff31f51e4583740c353831cd9
1,490
import os def gen_dir(download_dir, main_keyword): """Helper function | generates a directory where pics will be downloaded""" if not download_dir: download_dir = './data/' img_dir = download_dir + main_keyword + '/' if not os.path.exists(img_dir): os.makedirs(img_dir) return img_dir
2a8fe841ac4c2afdf64cd91f6dae1842e2c3c51d
1,491
def is_valid_page_to_edit(prev_pg_to_edit, pg_to_edit): """Check if the page is valid to edit or not Args: prev_pg_to_edit (obj): page to edit object of previous page pg_to_edit (obj): page to edit object of current page Returns: boolean: true if valid else false """ try: prev_pg_ref_end = int(prev_pg_to_edit.ref_end_page_no) cur_pg_ref_start = int(pg_to_edit.ref_start_page_no) cur_pg_ref_end = int(pg_to_edit.ref_end_page_no) except Exception: return False if prev_pg_to_edit == pg_to_edit: if cur_pg_ref_end >= cur_pg_ref_start: return True else: return False elif prev_pg_to_edit.vol != pg_to_edit.vol and cur_pg_ref_start <= cur_pg_ref_end: return True elif cur_pg_ref_start <= cur_pg_ref_end and prev_pg_ref_end <= cur_pg_ref_start: return True else: return False
ce594804f105b749062f79d63fc3021296631c1b
1,492
import shutil import os def if_binary_exists(binary_name, cc): """ Returns the path of the requested binary if it exists and clang is being used, None if not :param binary_name: Name of the binary :param cc: Path to CC binary :return: A path to binary if it exists and clang is being used, None if either condition is false """ binary = None if "clang" in cc: binary = shutil.which(binary_name, path=os.path.dirname(cc) + ":" + os.environ['PATH']) return binary
9f17748ba111a7ece33b8a0a7315c8832d15b014
1,493
import glob def mean_z_available(): """docstring for mean_z_available""" if glob.glob("annual_mean_z.nc"): return True return False
d53f8dc6fe540e8f74fd00760d1c810e510e53b8
1,495
import requests import io import tarfile def sources_from_arxiv(eprint): """ Download sources on arXiv for a given preprint. :param eprint: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``). :returns: A ``TarFile`` object of the sources of the arXiv preprint. """ r = requests.get("http://arxiv.org/e-print/%s" % (eprint,)) file_object = io.BytesIO(r.content) return tarfile.open(fileobj=file_object)
b26c46009b23c5a107d6303b567ab97492f91ad9
1,496
from typing import List def game_over(remaining_words: List[str]) -> bool: """Return True iff remaining_words is empty. >>> game_over(['dan', 'paul']) False >>> game_over([]) True """ return remaining_words == []
8d29ef06bd5d60082646cef00f77bbabfbac32eb
1,497
import csv def read_manifest(instream): """Read manifest file into a dictionary Parameters ---------- instream : readable file like object """ reader = csv.reader(instream, delimiter="\t") header = None metadata = {} for row in reader: if header is None: header = row else: metadata[row[0]] = row[1] return metadata
afa6c2bb0a9d81267b1d930026a229be924a1994
1,498
def query_urlhaus(session, provided_ioc, ioc_type): """ """ uri_dir = ioc_type if ioc_type in ["md5_hash", "sha256_hash"]: uri_dir = "payload" api = "https://urlhaus-api.abuse.ch/v1/{}/" resp = session.post(api.format(uri_dir), timeout=180, data={ioc_type: provided_ioc}) ioc_dicts = [] if resp.status_code == 200 and resp.text != "": resp_content = resp.json() if ioc_type == "host": if "urls" not in resp_content.keys() or len(resp_content["urls"]) == 0: ioc_dicts.append({"no data": provided_ioc}) return ioc_dicts for url in resp_content["urls"]: ioc_dict = { "provided_ioc": provided_ioc, "host": resp_content.get("host", None), "firstseen (host)": resp_content.get("firstseen", None), "urlhaus_reference (host)": resp_content.get("urlhaus_reference", None), "url": url.get("url", None), "url_status": url.get("url_status", None), "date_added (url)": url.get("date_added", None), "urlhaus_reference (url)": url.get("urlhaus_reference", None) } if url["tags"] != None: ioc_dict.update({ "tags (url)": ",".join(url.get("tags", None)) }) ioc_dicts.append(ioc_dict) elif ioc_type == "url": if "payloads" not in resp_content.keys() or len(resp_content["payloads"]) == 0: ioc_dicts.append({"invalid": provided_ioc}) return ioc_dicts for payload in resp_content["payloads"]: ioc_dict = { "provided_ioc": provided_ioc, "host": resp_content.get("host", None), "url": resp_content.get("url", None), "url_status": resp_content.get("url_status", None), "date_added (url)": resp_content.get("date_added", None), "urlhaus_reference (url)": resp_content.get("urlhaus_reference", None), "filename (payload)": payload.get("filename", None), "content_type (payload)": payload.get("content_type", None), "response_size (payload)": payload.get("response_size", None), "md5_hash (payload)": payload.get("response_md5", None), "sha256_hash (payload)": payload.get("response_sha256", None), "firstseen (payload)": payload.get("firstseen", None), "signature (payload)": payload.get("signature", None) } if resp_content["tags"] != None: ioc_dict.update({ "tags (url)": ",".join(resp_content.get("tags", None)) }) if payload["virustotal"] != None: ioc_dict.update({ "vt_result (payload)": payload["virustotal"].get("result", None), "vt_link (payload)": payload["virustotal"].get("link", None) }) ioc_dicts.append(ioc_dict) elif ioc_type in ["md5_hash", "sha256_hash"]: if len(resp_content["urls"]) == 0: ioc_dicts.append({"invalid": provided_ioc}) return ioc_dicts for url in resp_content["urls"]: ioc_dict = { "provided_ioc": provided_ioc, "content_type (payload)": resp_content.get("content_type", None), "file_size (payload)": resp_content.get("file_size", None), "md5_hash (payload)": resp_content.get("md5_hash", None), "sha256_hash (payload)": resp_content.get("sha256_hash", None), "firstseen (payload)": resp_content.get("firstseen", None), "lastseen (payload)": resp_content.get("lastseen", None), "signature (payload)": resp_content.get("signature", None), "url": url.get("url", None), "url_status": url.get("url_status", None), "filename (url)": url.get("filename", None), "firstseen (url)": url.get("firstseen", None), "lastseen (url)": url.get("lastseen", None), "urlhaus_reference (url)": url.get("urlhaus_reference", None) } if resp_content["virustotal"] != None: ioc_dict.update({ "vt_result (payload)": resp_content["virustotal"].get("result", None), "vt_link (payload)": resp_content["virustotal"].get("link", None) }) ioc_dicts.append(ioc_dict) return ioc_dicts return [{"invalid": provided_ioc}]
171bff1e9b1bfdf8ac6b91a4bbbd7226f80c8c4c
1,499
import webbrowser def open_in_browser(path): """ Open directory in web browser. """ return webbrowser.open(path)
41328b2b478f0bd69695da1868c412188e494d08
1,503
def encode_letter(letter): """ This will encode a tetromino letter as a small integer """ value = None if letter == 'i': value = 0 elif letter == 'j': value = 1 elif letter == 'l': value = 2 elif letter == 'o': value = 3 elif letter == 's': value = 4 elif letter == 't': value = 5 elif letter == 'z': value = 6 return value
6c72c4c9e44c93d045296ab1f49c7783f2b4fc59
1,504
def electron_mass_MeVc2(): """The rest mass of the electron in MeV/c**2 https://en.wikipedia.org/wiki/Electron """ return 0.5109989461
4496ddcc35a0aa6528cc19e47233f5a81626fefe
1,505
def clean_features(vgsales): """ This function cleans up some of the dataset's features. The dataset is quite messy as many values are missing from both categorical and numerical features. Many of these features are difficult to impute in a reasonable manner. <class 'pandas.core.frame.DataFrame'> Index: 16719 entries, Wii Sports to Winning Post 8 2016 Data columns (total 9 columns): Platform 16719 non-null category Release 16450 non-null Int64 Genre 16717 non-null category Publisher 16665 non-null category Sales 16719 non-null float64 Metacritic 8137 non-null float64 Metacritic_N 8137 non-null Int64 Developer 10096 non-null category ESRB 9950 non-null category dtypes: Int64(2), category(5), float64(2) memory usage: 1.5+ MB Some of the hardest features to impute (genre or platform, for example) don't have many nulls. Others, like the review averages, can be imputed. :param path: A path to a Video_Games_Sales_as_at_22_Dec_2016.csv compatible dataset. """ # A few of the release years are set to 2020 or other years past 2016. # Just setting them to 2016 here. They're not a lot of them anyway. vgsales.Release.loc[vgsales.Release > 2016] = 2016 # ============================================================================= # https://en.wikipedia.org/wiki/Entertainment_Software_Rating_Board # # The ESRB feature will be converted to an ordinal variable for machine # learning during preprocessing later. Thus, we organize them here and # add an NA for missing values. # ============================================================================= esrb_ordinal = ["NA", "RP", "EC", "E", "E10+", "T", "M", "AO"] vgsales.ESRB.cat.set_categories(esrb_ordinal, True, False, True) return vgsales
ffcae20af436d4012381c4933c841c3689fbbca0
1,506
def UpdateDatabase(asset, images, status): """Update the database entries of the given asset with the given data.""" return {'asset': asset}
1d7d42355410be7481e706e47d7810755974dadc
1,509
import hashlib import json def get_config_tag(config): """Get configuration tag. Whenever configuration changes making the intermediate representation incompatible the tag value will change as well. """ # Configuration attributes that affect representation value config_attributes = dict(frame_sampling=config.proc.frame_sampling) sha256 = hashlib.sha256() sha256.update(json.dumps(config_attributes).encode("utf-8")) return sha256.hexdigest()[:40]
2cab6e9473822d0176e878114ceb3fda94d1e0f7
1,510
def list_lines(lines): """Returns the list of trimmed lines. @param lines Multi-line string """ return list(filter(None, (x.strip() for x in lines.splitlines())))
293610d17e1fe8a27ab6bb5c35a349059e0179f3
1,511
def replace_newlines(s, replacement=' / ', newlines=(u"\n", u"\r")): """ Used by the status message display on the buddy list to replace newline characters. """ # turn all carraige returns to newlines for newline in newlines[1:]: s = s.replace(newline, newlines[0]) # while there are pairs of newlines, turn them into one while s.find(newlines[0] * 2) != -1: s = s.replace( newlines[0] * 2, newlines[0]) # replace newlines with the newline_replacement above return s.strip().replace(newlines[0], replacement)
d7b42ad67a3732c1ecac5bbfd7b9920b0215aa13
1,512
import requests import re def api_wowlight_version_check(version: str) -> bool: """ Checks incoming wow-lite wallet version, returns False when the version is too old and needs to be upgraded. :param version: :return: bool """ url = "https://raw.githubusercontent.com/wownero/wow-lite-wallet/master/src/renderer/components/Landing/LandingPage.vue" try: resp = requests.get(url, headers={"User-Agent": "Mozilla 5.0"}) resp.raise_for_status() content = resp.content.decode() except: return True # default to true # parse latest version current = next(re.finditer(r"wowlite\?version=(\d+.\d+.\d+)", content), None) if not current: return False return version == current.group(1)
470f8580df357c206b595c1145e04e33fd897058
1,515
import requests from bs4 import BeautifulSoup def soup_from_name(username): """ Grabs bs4 object from html page """ # html_source = urlopen('https://www.instagram.com/'+ str(username) + '/') url = 'https://www.instagram.com/'+ str(username) + '/' headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0)" \ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"} html_source = requests.get(url, headers=headers).text return BeautifulSoup(html_source, 'html.parser') #react-root > section > main > div > div.Nd_Rl._2z6nI > article > div._4Kbb_ > div > h2 # print(soup.body.span.section.main.div.div.article.div.div.h2)
442c6e9fa036fef59b82246462bf0e992384fd15
1,516
def construct_SN_default_rows(timestamps, ants, nif, gain=1.0): """ Construct list of ants dicts for each timestamp with REAL, IMAG, WEIGHT = gains """ default_nif = [gain] * nif rows = [] for ts in timestamps: rows += [{'TIME': [ts], 'TIME INTERVAL': [0.1], 'ANTENNA NO.': [antn], 'REAL1': default_nif, 'REAL2': default_nif, 'IMAG1': default_nif, 'IMAG2': default_nif, 'WEIGHT 1': default_nif, 'WEIGHT 2': default_nif} for antn in ants] return rows
b81e45d2d5299042b3332a2386a0fd4d2d6d59d7
1,517
def kron_diag(*lts): """Compute diagonal of a KroneckerProductLazyTensor from the diagonals of the constituiting tensors""" lead_diag = lts[0].diag() if len(lts) == 1: # base case: return lead_diag trail_diag = kron_diag(*lts[1:]) diag = lead_diag.unsqueeze(-2) * trail_diag.unsqueeze(-1) return diag.transpose(-1, -2).reshape(*diag.shape[:-2], -1)
d57bb679dede93ababb2d164cfc85132acef60db
1,518
import os def _parse_env(name, default=None, dtype=None): """Parse input variable from `os.environ`. Parameters ---------- name : str Name of the variable to parse from env. default : any, optional Set default value of variable. If None (default), parameter is considered required and so must be defined in environment. Otherwise, RuntimeError will be raised. dtype : type or None, optional Expected dtype of the variable. If None (default), variable will be parsed as a string. Other accepted values are: float, int, bool, str. """ try: val = os.environ[name] except KeyError: if default is not None: # Let's use the default value if var not in env return default raise RuntimeError("variable {:} not specified".format(name)) # Parse var from env using the specified dtype if dtype is None or dtype == str: return str(val) if dtype == int or dtype == float or dtype == bool: return dtype(val) else: raise TypeError( "accepted dtypes are int, float, bool, str (or None)")
ab947506899ffefafc37197877bcb51ea9bc78da
1,522
def add_init_or_construct(template, variable_slot, new_data, scope, add_location=-1): """Add init or construct statement.""" if isinstance(new_data, list): template[variable_slot][scope].extend(new_data) return template if add_location < 0: template[variable_slot][scope].append(new_data) else: template[variable_slot][scope].insert(add_location, new_data) return template
125bc4e34dff837372dbbdc70c69a08a1e83e176
1,523
import os def get_subdirs(dir): """Get the sub-directories of a given directory.""" return [os.path.join(dir,entry) for entry in os.listdir(dir) \ if os.path.isdir(os.path.join(dir,entry))]
64b204b1c2878e454910b3d27a326d16a585477a
1,524
def no_conjugate_member(magic_flag): """should not raise E1101 on something.conjugate""" if magic_flag: something = 1.0 else: something = 1.0j if isinstance(something, float): return something return something.conjugate()
5e32d31aa907ac9de2bd153bbe61354207262409
1,526
def isUniqueSeq(objlist): """Check that list contains items only once""" return len(set(objlist)) == len(objlist)
4522c43967615dd54e261a229b05c742676c7f99
1,528
def tag_to_dict(node): """Assume tag has one layer of children, each of which is text, e.g. <medalline> <rank>1</rank> <organization>USA</organization> <gold>13</gold> <silver>10</silver> <bronze>9</bronze> <total>32</total> </medalline> """ d = {} for child in node: d[child.tag] = child.text return d
e2131e070dce8620630e994cc25578a9a8438c64
1,531
def _vj_stat(v = None, j = None, freq_type = 'vj_occur_freq', ts = None): """ Return estimate of a single v-gene, j-gene, or vj-gene-pairings frequency specified < v > and <j> argumens , given a tcrsamper instance < ts > Parameters ---------- v : str j : str e.g., freq_type : str 'vj_occur_freq', 'vj_freq', 'v_occur_freq', 'v_freq', 'j_occur_freq', 'j_freq' df : pd.DataFrame DataFrame containing v and j gene names ts : tcrsampler.sampler.TCRsampler sampler instance Example ------- >>> import pandas as pd >>> import os >>> from tcrsampler.sampler import TCRsampler >>> from tcrregex.vj_diff import * >>> t = TCRsampler() >>> fn = os.path.join("tcrregex", "test_files", 'britanova_chord_blood_sample_5000.csv' ) >>> t.ref_df = pd.read_csv(fn) >>> t.build_background() >>> _vj_stat(v = 'TRBV20-1*01' , j ='TRBJ2-1*01', ts = t, freq_type = 'vj_occur_freq') 0.014802960592118424 >>> _vj_stat(v = 'TRBV20-1*01' , ts = t, freq_type = 'v_occur_freq') 0.060012002400480095 >>> _vj_stat(j = 'TRBJ2-1*01', ts = t, freq_type = 'j_occur_freq') 0.272254450890178 """ if ts is None: raise ValueError("._vj_stat requires < ts > be a TCRsampler instance") if v is None and j is None: raise ValueError("Niether a v- nor j-gene was supplied to ._vj_stat ; atleast one must be provided") if v is None: tp = j assert freq_type in ['j_freq', 'j_occur_freq'] elif j is None: tp = v assert freq_type in ['v_freq', 'v_occur_freq'] else: tp = (v,j) assert freq_type in ['vj_freq', 'vj_occur_freq'] return ts.__dict__[freq_type][tp]
99228d17714c5ba403071ad0251f8642bc3148e6
1,532
from typing import Dict from typing import Any import yaml def yaml_dump(dict_to_dump: Dict[str, Any]) -> str: """Dump the dictionary as a YAML document.""" return yaml.safe_dump(dict_to_dump, default_flow_style=False)
4635514ba8ff901656b8a4b5869a6ae101528fa8
1,533
def dict_check_defaults(dd, **defaults): """Check that a dictionary has some default values Parameters ---------- dd: dict Dictionary to check **defs: dict Dictionary of default values Example ------- .. ipython:: python @suppress from xoa.misc import dict_check_defaults dd = dict(color='blue') dict_check_defaults(dd, color='red', size=10) """ if defaults is None: defaults = {} for item in defaults.items(): dd.setdefault(*item) return dd
8edc3fdb351f7ec2d4ec3b1e788e6aa5cc0f8787
1,534
def make_album(singer, name, number = ''): """Return singers' names and album""" album = {'singer': singer, 'name': name} if number: album['number'] = number return album
1f1bfaaeb501be0aa6fefd358177922246488b31
1,535
import hashlib def cache_key(path): """Return cache key for `path`.""" return 'folder-{}'.format(hashlib.md5(path.encode('utf-8')).hexdigest())
6b9afe1267e0cc0c7168bf3b0d5c7536e2b3c768
1,537
import torch def quaternion2rotationPT( q ): """ Convert unit quaternion to rotation matrix Args: q(torch.tensor): unit quaternion (N,4) Returns: torch.tensor: rotation matrix (N,3,3) """ r11 = (q[:,0]**2+q[:,1]**2-q[:,2]**2-q[:,3]**2).unsqueeze(0).T r12 = (2.0*(q[:,1]*q[:,2]-q[:,0]*q[:,3])).unsqueeze(0).T r13 = (2.0*(q[:,1]*q[:,3]+q[:,0]*q[:,2])).unsqueeze(0).T r21 = (2.0*(q[:,1]*q[:,2]+q[:,0]*q[:,3])).unsqueeze(0).T r22 = (q[:,0]**2+q[:,2]**2-q[:,1]**2-q[:,3]**2).unsqueeze(0).T r23 = (2.0*(q[:,2]*q[:,3]-q[:,0]*q[:,1])).unsqueeze(0).T r31 = (2.0*(q[:,1]*q[:,3]-q[:,0]*q[:,2])).unsqueeze(0).T r32 = (2.0*(q[:,2]*q[:,3]+q[:,0]*q[:,1])).unsqueeze(0).T r33 = (q[:,0]**2+q[:,3]**2-q[:,1]**2-q[:,2]**2).unsqueeze(0).T r = torch.cat( (r11,r12,r13, r21,r22,r23, r31,r32,r33), 1 ) r = torch.reshape( r, (q.shape[0],3,3)) return r
feeed764ee179b31674790f9d2afc7b606a02aef
1,538
def get_total_trainsets(df_anual_data, segments): """ # Fill the training_sets dict :param df_anual_data: :return: """ rows_per_day = int(((60 / 15) * 24)) training_sets = {'ID_SEGMENT': [], 'MES': [], 'COD_LABORALIDAD': [], 'TRAINING_SET': []} for seg_id in segments: # 1) Particionar anual_data por segmento df_seg = df_anual_data.loc[df_anual_data.ID_SEGMENT == seg_id] for month_i in df_seg.FECHA.dt.month.unique(): # 2) Dividir mensual_data en 12 datasets df_month_seg = df_seg.loc[df_seg.FECHA.dt.month == month_i] for code_i in df_month_seg.COD_LABORALIDAD.unique(): # 3) Particionar por dias con mismo código de lab df_month_seg_code = df_month_seg.loc[df_month_seg.COD_LABORALIDAD == code_i] # Fill training_sets dictionary training_sets['ID_SEGMENT'].append(seg_id) training_sets['MES'].append(month_i) training_sets['COD_LABORALIDAD'].append(code_i) training_sets['TRAINING_SET'].append(df_month_seg_code) return training_sets
968c3af1fdba5eb759eb93618ed48e3ca3ce5223
1,539
def find_most_similar(top_k, probs, cache_dict, num=10): """返回最相似的num张照片的文件名,如果找到相似的, 则返回一个包括匹配元组的列表,否则返回一个空列表 top_k : 包含最佳分类的索引的列表 probs : 包含最佳分类索引对应的概率 cache_dict: 缓存中的索引和概率 num : 返回最近匹配的数目 """ similar = [] for filename in cache_dict: score = 0 count = 0 other_top_k, other_probs = cache_dict[filename] for i, t in enumerate(top_k): if t in other_top_k: prob = probs[i] other_prob = other_probs[other_top_k.tolist().index(t)] score += abs(prob-other_prob) count += 1 if count > 0: score = score / count similar.append((filename, score)) if similar: similar.sort(key=lambda item: item[1]) # 根据score升序排序 return similar[:num] return similar
471083e1ed2b0fadb98cafad64d314ba779aa9e6
1,540
from operator import mul from operator import inv def berlekamp_massey(s): """Given a sequence of LFSR outputs, find the coefficients of the LFSR.""" C, B, L, m, b = [1], [1], 0, 1, 1 for n in range(len(s)): d = s[n] for i in range(1, L + 1): d ^= mul(C[i], s[n - i]) if d == 0: m += 1 else: T = list(C) while len(C) <= len(B) + m: C += [0] t = mul(d, inv(b)) for i in range(len(B)): C[i + m] ^= mul(t, B[i]) if 2 * L <= n: L, B, b, m = n + 1 - L, T, d, 1 else: m += 1 return C[0:L + 1]
351f52dce7e4a95b986cc169f380347f317f851a
1,541
def normalize_parameter(kv): """ Translate a parameter into standard form. """ (k, v) = kv if k[0] == 'requiressl' and v in ('1', True): k[0] = 'sslmode' v = 'require' elif k[0] == 'dbname': k[0] = 'database' elif k[0] == 'sslmode': v = v.lower() return (tuple(k),v)
933ea71f452a16c1d4ae2630d6b58a92da1cbec0
1,542
def __iadd__(self, other): """Pythonic use of concat Example: xs += ys Returns self.concat(self, other)""" return self.concat(self, other)
713980aed9713c2882a19ae9837315a431611bbc
1,543
def calculate_reliability(data): """ Calculates the reliability rating of the smartcab during testing. """ success_ratio = data['success'].sum() * 1.0 / len(data) if success_ratio == 1: # Always meets deadline return ("A+", "green") else: if success_ratio >= 0.90: return ("A", "green") elif success_ratio >= 0.80: return ("B", "green") elif success_ratio >= 0.70: return ("C", "#EEC700") elif success_ratio >= 0.60: return ("D", "#EEC700") else: return ("F", "red")
d1c9ad7bba220beeae06c568cfd269aaaebfb994
1,545
def explode(screen): """Convert a string representing a screen display into a list of lists.""" return [list(row) for row in screen.split('\n')]
a43a9d8c830c4a784bb9c3505c62aaf2077bb732
1,548
import csv import glob import os def merge_csv_files(directory, out): """\ Merges the CSV files in the provided `directory` into one CSV file. :param str directory: Path where to find the CSV files :param str out: Resulting file name. """ f = open(out, 'w', encoding='utf-8') writer = csv.writer(f) writerow = writer.writerow writerow(['URL', 'Draft Date', 'Document Number', 'Film Number', 'From', 'Subject', 'TAGS', 'To']) cnt = 0 for fn in sorted(glob.glob(directory + '*.csv'), key=lambda fn: int(os.path.basename(fn).split('.')[0])): with open(fn, 'r', encoding='utf-8') as inputfile: reader = csv.reader(inputfile) for row in reader: cnt += 1 writerow(row) f.close() return cnt
4d5066b27b9977161b92061a39a6207040982b41
1,549
def fpath_to_pgn(fpath): """Slices the pgn string from file path. """ return fpath.split('/')[-1].split('.jpeg')[0]
1cc6cad60c5356b6c731947a59998117bf15035a
1,552
def data_zip(data): """ 输入数据,返回一个拼接了子项的列表,如([1,2,3], [4,5,6]) -> [[1,4], [2,5], [3,6]] {"a":[1,2],"b":[3,4]} -> [{"a":1,"b":3}, {"a":2,"b":4}] :param data: 数组 data 元组 (x, y,...) 字典 {"a":data1, "b":data2,...} :return: 列表或数组 """ if isinstance(data, tuple): return [list(d) for d in zip(*data)] if isinstance(data, dict): data_list = [] keys = data.keys() for i in range(len(data[list(keys)[0]])): # 迭代字典值中的数据 data_dict = {} for key in keys: data_dict[key] = data[key][i] data_list.append(data_dict) return data_list return data
31dcaa3905a7d062cfe994543df31f293fdc962a
1,553
from pathlib import Path from typing import List import re def tags_in_file(path: Path) -> List[str]: """Return all tags in a file.""" matches = re.findall(r'@([a-zA-Z1-9\-]+)', path.read_text()) return matches
1071c22ac79f51697b2ed18896aa1d17568ecb2c
1,554
def check_ip_in_lists(ip, db_connection, penalties): """ Does an optimized ip lookup with the db_connection. Applies only the maximum penalty. Args: ip (str): ip string db_connection (DBconnector obj) penalties (dict): Contains tor_penalty, vpn_penalty, blacklist_penalty keys with integer values Returns: :int: penalty_added """ penalties = {'tor': int(penalties['tor_penalty']), 'vpn': int(penalties['vpn_penalty']), 'blacklist': int(penalties['ip_blacklist_penalty'])} penalties = sorted(penalties.items(), key=lambda x: x[1]) # sort by penalty value to check in that order and perform early stopping penalty_added = 0 for penalty_type, penalty_value in penalties: if penalty_value == 0: continue if penalty_type == 'tor': if db_connection.set_exists('tor_ips', ip): penalty_added = penalty_value elif penalty_type == 'blacklist': if db_connection.set_exists('blacklist_ips', ip): penalty_added = penalty_value elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:3])): penalty_added = penalty_value elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:2])): penalty_added = penalty_value elif penalty_type == 'vpn': if db_connection.set_exists('vpn_ips', ip): penalty_added = penalty_value elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:3])): penalty_added = penalty_value elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:2])): penalty_added = penalty_value if penalty_added > 0: break return penalty_added
2d6e3615d4b0d9b0fb05e7a0d03708856ffcbfef
1,555
def aiohttp_unused_port(loop, aiohttp_unused_port, socket_enabled): """Return aiohttp_unused_port and allow opening sockets.""" return aiohttp_unused_port
9c5d0c1125a7758be2e07a8f8aca6676429a841a
1,556
import os def os_specific_command_line(command_line): """ Gets the operating system specific command string. :param command_line: command line to execute. :type command_line: str """ current_os = os.environ["TEMPLATE_OS"] command = "/bin/bash -c '{}'" if current_os.lower() == "linux" else "cmd.exe /c \"{}\"" return command.format(command_line)
898b97a57841af3c671bf530c6a31460bd1882a7
1,557
def keypoint_angle(kp1, kp2): """求两个keypoint的夹角 """ k = [ (kp1.angle - 180) if kp1.angle >= 180 else kp1.angle, (kp2.angle - 180) if kp2.angle >= 180 else kp2.angle ] if k[0] == k[1]: return 0 else: return abs(k[0] - k[1])
3feee667bcf767656da6334727b8d502be41d909
1,559
def is_validated(user): """Is this user record validated?""" # An account is "validated" if it has the `validated` field set to True, or # no `validated` field at all (for accounts created before the "account # validation option" was enabled). return user.get("validated", True)
c1ddfc52a62e71a68798dc07e7576a4ae42aa17f
1,562
import pickle def load_config(path): """Loads the config dict from a file at path; returns dict.""" with open(path, "rb") as f: config = pickle.load(f) return config
eb12aed2ebdeebacf3041f3e4880c714f99c052c
1,563
def lower_strings(string_list): """ Helper function to return lowercase version of a list of strings. """ return [str(x).lower() for x in string_list]
58dcaccbc0f4ce8f22d80922a3ac5da26d7f42b1
1,564
import os def normalize_group_path(group, suffix=None): """ :param group: :param suffix: :return: """ group = os.path.join('/', group) if suffix is not None: if not group.endswith(suffix): group = os.path.join(group, suffix.rstrip('/')) return group
31b0ef7eb808dce8ea51a0f4edbaec61e5c5cc2c
1,565
import os import subprocess def mock_data(rootdir, data_dir): """Build mock functional data from available atlases""" mock_dir = os.path.join(data_dir, 'mock') if not os.path.exists(mock_dir): subprocess.run("python setup_mock_data.py".split(), cwd=rootdir) return mock_dir
2eed6ba8da9849e099841f61af56f1a982151c66
1,566
def eiffel_artifact_created_event(): """Eiffel artifact created event.""" return { "meta": { "id": "7c2b6c13-8dea-4c99-a337-0490269c374d", "time": 1575981274307, "type": "EiffelArtifactCreatedEvent", "version": "3.0.0", }, "links": [], "data": {"identity": "pkg:artifact/created/test@1.0.0"}, }
0ef2e5adadb58b92c94bac42c9880728573b159e
1,567
def _select_index_code(code): """ 1 - sh 0 - sz """ code = str(code) if code[0] == '3': return 0 return 1
697d8e5ca1744c897b7eebbb7b9b0a3b45faec3d
1,568
import hashlib import hmac def _HMAC(K, C, Mode=hashlib.sha1): """ Generate an HMAC value. The default mode is to generate an HMAC-SHA-1 value w/ the SHA-1 algorithm. :param K: shared secret between client and server. Each HOTP generator has a different and unique secret K. :type K: bytes :param C: 8-byte counter value, the moving factor. This counter MUST be synchronized between the HOTP generator (client) and the HOTP validator (server). :type C: bytes :param Mode: The algorithm to use when generating the HMAC value :type Mode: hashlib.sha1, hashlib.sha256, hashlib.sha512, or hashlib.md5 :return: HMAC result. If HMAC-SHA-1, result is 160-bits (20-bytes) long. :rtype: bytes """ return hmac.new(K, C, Mode).digest()
db9bf26c52427acc259f3cb1590c7c13b0d0dd9e
1,569
def extends_dict(target, source): """ Will copy every key and value of source in target if key is not present in target """ for key, value in source.items(): if key not in target: target[key] = value elif type(target[key]) is dict: extends_dict(target[key], value) elif type(target[key]) is list: target[key] += value return target
5a68dde5e3bb7dbb81ad61c3698614f56dd5efd7
1,570
def nth_even(n): """Function I wrote that returns the nth even number.""" return (n * 2) - 2
26e1465a039352917647ae650d653ed9842db7f6
1,571
import typing def remove_fields_with_value_none(fields: typing.Dict) -> typing.Dict: """ Remove keys whose value is none :param fields: the fields to clean :return: a copy of fields, without the none values """ fields = dict((key, value) for key, value in fields.items() if value is not None) # Strip out none values return fields
22d7ac2a77248809c691bdb98f5f6ebaaf6d4f2b
1,572
import hashlib def calculate_file_hash(f, alg, buf_size): """BUF_SIZE - 64 kb need for large file""" h = hashlib.new(alg) for chunk in iter(lambda: f.read(buf_size), b""): h.update(chunk) return h.hexdigest()
6361ef8f18f5ae66e1d51503426c77f7505e10be
1,574
import typing import hashlib def sha512(data: typing.Optional[bytes] = None): """Returns a sha512 hash object; optionally initialized with a string.""" if data is None: return hashlib.sha512() return hashlib.sha512(data)
067fffc4c006d9c46e5037b07b86149ac15bb573
1,575
def compute_state(observations, configuration): """ :param observations: :param configuration: :return StateTensor: """ StateTensorType = configuration.STATE_TYPE return StateTensorType([observations])
44a08caa02137438359c4cd764fff1700b6252b2
1,577
def nt(node, tag): """ returns text of the tag or None if the tag does not exist """ if node.find(tag) is not None and node.find(tag).text is not None: return node.find(tag).text else: return None
7ca5f83cf18f918f594374fa2aa875415238eef6
1,578
def _explored_parameters_in_group(traj, group_node): """Checks if one the parameters in `group_node` is explored. :param traj: Trajectory container :param group_node: Group node :return: `True` or `False` """ explored = False for param in traj.f_get_explored_parameters(): if param in group_node: explored = True break return explored
71cbafbad0dcc3fa9294c0bede5f6a09941d452b
1,581
from pathlib import Path def _construct_out_filename(fname, group_name): """ Construct a specifically formatted output filename. The vrt will be placed adjacent to the HDF5 file, as such write access is required. """ basedir = fname.absolute().parent basename = fname.with_suffix('.vrt').name.replace( 'wagl', group_name ) out_fname = basedir.joinpath(Path(basename)) return out_fname
117bb8470ab65f0b9fb11bb3151ae653e5e28d23
1,582
def _format_breed_name(name): """ Format breed name for displaying INPUT name: raw breed name, str OUTPUT name : cleaned breed name, str """ return name.split('.')[1].replace('_', ' ')
0c2680de9bd19e61d717fb84c1ce01e5095ddf35
1,584
from datetime import datetime def now(): """ Get current timestamp Returns: str: timestamp string """ current_time = datetime.now() str_date = current_time.strftime("%d %B %Y, %I:%M:%S %p") return str_date
4c487416fa119cae0c5310678dfd96e0f737b937
1,585
import ast def _get_import(name, module: ast.Module): """ get from import by name """ for stm in ast.walk(module): if isinstance(stm, ast.ImportFrom): for iname in stm.names: if isinstance(iname, ast.alias): if iname.name == name: return 'from ' + str(stm.module) + ' import ' + name if isinstance(stm, ast.Import): pass return None
bc33a882c65f7fe44d446376db3a71631629ff04
1,586
def get_dict_from_args(args): """Extracts a dict from task argument string.""" d = {} if args: for k,v in [p.strip().split('=') for p in args.split(',')]: d[k] = v return d
8fb05329f6119393f94215808c6ab9b3116ec759
1,587
def change_app_header(uri, headers, body): """ Add Accept header for preview features of Github apps API """ headers["Accept"] = "application/vnd.github.machine-man-preview+json" return uri, headers, body
3610d1d482e057ba73a1901aed8430ff35d98f3b
1,588
def fib_fail(n: int) -> int: """doesn't work because it's missing the base case""" return fib_fail(n - 1) + fib_fail(n - 2)
6e8138b7ce330c9ab191367e3911fe8146240c25
1,589
import itertools def largets_prime_factor(num): """ Returns the largest prime factor of num. """ prime_factors = [] for n in itertools.count(2): if n > num: break if num%n == 0: prime_factors.append(n) while (num%n == 0): num = num/n return max(prime_factors)
12100b6cdc2e0553295c1803e699544aa930bbfb
1,590
def format_decimal(amount): """ jinja2 filter function for decimal number treatment """ amt_whole = int(amount) amt_whole_len = len(str(amt_whole)) if amount < 1: amt_str = '{:0.15f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 4: amt_str = '{:0.3f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 6: amt_str = '{:0.2f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 9: amt_str = '{:0.1f}'.format(amount).rstrip("0").rstrip(".") else: amt_str = '{}'.format(amt_whole) return amt_str
55ee4b6134abd409ade396233fa07061d0a30764
1,591
def remove_special_char(df, col): """Removes special characters such as % and $ from numeric variables and converts them into float""" df[col] = df[col].replace(regex = True, to_replace = r'[^0-9.\-]', value=r'') df[col] = df[col].astype("float") return df[col]
c6c4c86eb480d2f045e40b3eb831d0b8d5381d33
1,592
def swap(lst, idx1, idx2): """ >>> swap([0, 1, 2], 0, 1) [1, 0, 2] >>> swap([0, 1, 2], 0, 0) [0, 1, 2] """ # print("Swapping [{}, {}] from {}".format(idx1, idx2, lst)) lst[idx1], lst[idx2] = lst[idx2], lst[idx1] # print("resulting to {}".format(lst)) return lst
81dee804db05eedaa1a9b5611e836a4c1da89b4b
1,593
def round_grade(grade: int) -> int: """ Round the grade according to policy. Parameters ---------- grade: int Raw grade. Returns ------- rounded_grade: int Rounded grade. """ if grade < 38: rounded_grade = grade else: closest_multiple_5 = (grade // 5 + 1) * 5 if (closest_multiple_5 - grade) >= 3: rounded_grade = grade else: rounded_grade = closest_multiple_5 return rounded_grade
8f1be9575d98b4ed24ff1e5904a5345d7ebc3e48
1,595
def _urpc_test_func_2(buf): """! @brief u-RPC variable length data test function. @param buf A byte string buffer @return The same byte string repeated three times """ return buf*3
f13f7dcf45eaa0706b69eb09c63d29ba2bbd3d60
1,596
def versionString(version): """Create version string.""" ver = [str(v) for v in version] numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:] return '.'.join(numbers) + '-'.join(rest)
2feec3f8ac5a1f2b848d0805dfa0c3ff53a44ead
1,597
def pymongo_formatter(credentials): """Returns a DSN for a pymongo-MongoDB connection. Note that the username and password will still be needed separately in the constructor. Args: credentials (dict): The credentials dictionary from the relationships. Returns: (string) A formatted pymongo DSN. """ return '{0}:{1}/{2}'.format( credentials['host'], credentials['port'], credentials['path'] )
69216575258f297c368ec3015c1c14569bb82cd2
1,601
def get_scanner(fs_id): """ get scanner 3T or 1.5T""" sc = fs_id.split("_")[2] if sc in ("15T", "1.5T", "15t", "1.5t"): scanner = "15T" elif sc in ("3T", "3t"): scanner = "3T" else: print("scanner for subject " + fs_id + " cannot be identified as either 1.5T or 3T...") print("Please double check the IDs in the list of subjects") scanner = "false" return scanner
f905bd16f3103b0c6c02193d30fb945646afb54c
1,602
def compute_alphabet(sequences): """ Returns the alphabet used in a set of sequences. """ alphabet = set() for s in sequences: alphabet = alphabet.union(set(s)) return alphabet
cf8f7dc1e31a28fe0910d806d18189aae7d7a85b
1,603
def Diff(a, b): """Returns the number of different elements between 2 interables. Args: a(iterable): first iterable. b(iterable): second iterable. Returns: int: the number of different elements. """ return sum(map(lambda x, y: bool(x-y), a, b))
0885bd224f956f138e80a4b681ebc581c733cc51
1,604
import random def _get_random_hangul(count=(0xd7a4 - 0xac00)): """Generate a sequence of random, unique, valid Hangul characters. Returns all possible modern Hangul characters by default. """ valid_hangul = [chr(_) for _ in range(0xac00, 0xd7a4)] return random.sample(valid_hangul, count)
3a41edd36cd2aac05e51a121743bcfb61455bd9b
1,605
import toml def load_page_details(data, filename=None): """ # Raises ValueError of (filename, error) """ try: options = toml.loads(data) except toml.TomlDecodeError as exc: raise ValueError(filename, exc) if not isinstance(options, dict): raise ValueError(filename, 'page details could not be parsed into a JSON object') return options
117bb7d84625475745a30522fda7dccf1bc5a487
1,607
def display_datetime(datetime_str, time_zone=None, verbose=True): """Returns a formatted datetime with TZ (if provided) or 'Error (Missing)""" """ >>> print(datetime.datetime.utcnow().strftime("%Y/%m/%d %a %I:%M %p")) 2019/05/19 Sun 01:10 AM """ if datetime_str: # and type(datetime_str) == datetime.datetime.now(): if verbose: return f'{datetime_str.strftime("%Y/%m/%d %a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}' else: return f'{datetime_str.strftime("%a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}' else: return 'Error (Missing)'
45caa488688e790ae19f8f3f2cda2cb0f250b1fd
1,608
import torch def mask_channels(mask_type, in_channels, out_channels, data_channels=3): """ Creates an autoregressive channel mask. Input: mask_type: str Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others. in_channels: int Number of input channels to layer. out_channels: int Number of output channels of layer. data_channels: int Number of channels in the input data, e.g. 3 for RGB images. (default = 3). Output: mask: torch.FloatTensor Shape (out_channels, in_channels). A mask with 0 in places for masked elements. """ in_factor = in_channels // data_channels + 1 out_factor = out_channels // data_channels + 1 base_mask = torch.ones([data_channels,data_channels]) if mask_type == 'A': base_mask = base_mask.tril(-1) else: base_mask = base_mask.tril(0) mask_p1 = torch.cat([base_mask]*in_factor, dim=1) mask_p2 = torch.cat([mask_p1]*out_factor, dim=0) mask = mask_p2[0:out_channels,0:in_channels] return mask
772fa71f63d2f31c80966db0b0eb43a70ac5e9a9
1,609
import textwrap def dedent(text): """ Remove all common indentation from every line but the 0th. This will avoid getting <code> blocks when rendering text via markdown. Ignoring the 0th line will also allow the 0th line not to be aligned. Args: text: A string of text to dedent. Returns: String dedented by above rules. For example: assertEquals("bar\nline1\nline2", dedent("bar\n line1\n line2")) assertEquals("bar\nline1\nline2", dedent(" bar\n line1\n line2")) assertEquals("bar\n line1\nline2", dedent(" bar\n line1\n line2")) """ text = textwrap.dedent(text) text_lines = text.split('\n') text_not_first = "\n".join(text_lines[1:]) text_not_first = textwrap.dedent(text_not_first) text = text_lines[0] + "\n" + text_not_first return text
b450a873c4c2b667d10c66985d19f8057aa205f9
1,610
def S_difference_values(_data_lista, _data_listb): """ Returns new data samples where values are transformed by transformer values. """ d_data = [] dsa = len(_data_lista) dsb = len(_data_listb) if dsa != dsb: return [] for i in range(dsa): d_data.append(_data_lista[i] - _data_listb[i]) return d_data
40ec82cb7ef53d5e227b3287a9c1d08e78112e09
1,611
import platform def get_platform_system(): """return platform.system platform module has many regexp, so importing it is slow... import only if required """ return platform.system()
2531f1883d5acd0c192c0061d7cbf29637197706
1,613
def sumaDigits(s): """assumes s is a string and returns the sum of the decimal digits in s. For example if s is 'a2b3c' it returns 5""" suma = 0 for c in s: try: suma+=int(c) except ValueError: continue return suma
47b09476925d45741d97eca5362e736f83a8185d
1,614
def join_nonempty(l): """ Join all of the nonempty string with a plus sign. >>> join_nonempty(('x1 + x2 + x1:x2', 'x3 + x4')) 'x1 + x2 + x1:x2 + x3 + x4' >>> join_nonempty(('abc', '', '123', '')) 'abc + 123' """ return ' + '.join(s for s in l if s != '')
041948f95caaef14cb96e761f08b4a84fba37d6e
1,615
def sieveEr(N): """ input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N. This function implements the algorithm called sieve of erathostenes. """ # precondition assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2" # beginList: conatins all natural numbers from 2 upt to N beginList = [x for x in range(2,N+1)] ans = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(beginList)): for j in range(i+1,len(beginList)): if (beginList[i] != 0) and \ (beginList[j] % beginList[i] == 0): beginList[j] = 0 # filters actual prime numbers. ans = [x for x in beginList if x != 0] # precondition assert isinstance(ans,list), "'ans' must been from type list" return ans
8d48d2a491341d5302307597ad64ac4a37b1abb8
1,617
def validate_fields(item, fields=None): """ Check that all requested fields were returned :param item: comment or submission :param fields: list[str] :return: list[str] """ actual_fields = item.d_.keys() if fields is None: requested_fields = actual_fields else: requested_fields = fields missing_fields = set(requested_fields).difference(actual_fields) # drop extra fields returned from api final_fields = set(requested_fields).intersection(actual_fields) return final_fields, missing_fields
88bd6d20ba1cc04f8478128f7f32192ef680762b
1,618
from typing import Iterable def remove_nones(sequence: Iterable) -> list: """Removes elements where bool(x) evaluates to False. Examples -------- Normal usage:: remove_nones(['m', '', 'l', 0, 42, False, True]) # ['m', 'l', 42, True] """ # Note this is redundant with it.chain return [x for x in sequence if x]
975c0104b3cc05bb82fa211c1b85b49c7d3cb174
1,619
from typing import List import pathlib def retrieve(passed: List[str]) -> List[str]: """ Retrieves all items that are able to be converted, recursively, from the passed list. Parameters ---------- passed: List[str] The items to search. Returns ------- List[str]: All found items. """ ret = [] for item in passed: try: path = pathlib.Path(item) if path.is_file() and path.suffix == ".txt": ret += retrieve(path.read_text().split("\n")) elif path.is_file(): ret.append(str(path)) elif path.is_dir(): ret += retrieve([str(p) for p in path.iterdir()]) else: ret.append(item) except OSError: ret.append(item) return ret
6789255e302caf9dc6e481df532acec20dfc6b3c
1,620
def removeBots(gdf, bot_list): """ A Function for removing Twitter bots. Parameters ---------- gdf: <gpd.GeoDataFrame> A GeoDataFrame from which Twitter bots should be removed. bot_list: <list> Input either 'home_unique_days' or 'home_unique_weeks' Output ------ <gpd.GeoDataFrame> A processed GeoDataFrame. Likely bots removed. """ copy = gdf for index, row in gdf.iterrows(): userid = str(row['user']['id']) for item in bot_list: bot_id = item['userid'] if bot_id == userid: gdf = gdf.drop(index) print("A bot dropped: ID", userid, ". Length of GDF now: ", len(gdf)) print("Processing: ", index, "/", len(copy)) return(gdf)
e938f46bcf5c87dfa81db96f127c88d948f061db
1,621
def bq_to_rows(rows): """Reformat BigQuery's output to regular pnguin LOD data Reformat BigQuery's output format so we can put it into a DataFrame Args: rows (dict): A nested list of key-value tuples that need to be converted into a list of dicts Returns: list: A list of dictionaries based on the input x """ def _reformat(x): pairs = x.items() row = {} for pair in pairs: key, value = pair row[key] = value return row return [_reformat(x) for x in rows]
9ff842d1c41d7ebe5c822d4c07b2f26b5524b0fe
1,622
def css_defaults(name, css_dict): """Находит первое значение по-умолчанию background -> #FFF color -> #FFF content -> "" """ cur = css_dict.get(name) or css_dict.get(name[1:-1]) if cur is None: return None default = cur.get('default') if default is not None: return default for v in cur['values']: if v.startswith('<') and v.endswith('>'): ret = css_defaults(v, css_dict) if ret is not None: return ret
8418af5e27dfc85a3ec70dea2e7416595ee86a1f
1,623
def average_saccades_time(saccades_times): """ :param saccades_times: a list of tuples with (start_time_inclusive, end_time_exclusive) :return: returns the average time of saccades """ return sum([saccade_time[1] - saccade_time[0] for saccade_time in saccades_times]) / len(saccades_times)
a22a5d89ddd4317fa10ed6f5d920f17560028514
1,625