content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _indent(s):
# type: (str) -> int
"""
Compute the indentation of s, or None of an empty line.
Example:
>>> _indent("foo")
0
>>> _indent(" bar")
4
>>> _indent(" ")
>>> _indent("")
"""
t = s.lstrip()
return len(s) - len(t) if t else None | c523704bf4ff75f132c9e021a4db0d0ac5482f0b | 900 |
import re
def normalize_middle_high_german(
text: str,
to_lower_all: bool = True,
to_lower_beginning: bool = False,
alpha_conv: bool = True,
punct: bool = True,
):
"""Normalize input string.
to_lower_all: convert whole text to lowercase
alpha_conv: convert alphabet to canonical form
punct: remove punctuation
>>> from cltk.alphabet import gmh
>>> from cltk.languages.example_texts import get_example_text
>>> gmh.normalize_middle_high_german(get_example_text("gmh"))[:50]
'ik gihorta ðat seggen\\nðat sih urhettun ænon muotin'
"""
if to_lower_all:
text = text.lower()
if to_lower_beginning:
text = text[0].lower() + text[1:]
text = re.sub(r"(?<=[\.\?\!]\s)(\w)", lambda x: x.group(1).lower(), text)
if alpha_conv:
text = (
text.replace("ē", "ê")
.replace("ī", "î")
.replace("ā", "â")
.replace("ō", "ô")
.replace("ū", "û")
)
text = text.replace("ae", "æ").replace("oe", "œ")
if punct:
text = re.sub(r"[\.\";\,\:\[\]\(\)!&?‘]", "", text)
return text | 543a69175cd78bf2678bd0b173e1112e96d75fd8 | 901 |
def _convert_format(input_format, reverse=0):
"""Convert FITS format spec to record format spec. Do the opposite
if reverse = 1.
"""
fmt = input_format
(repeat, dtype, option) = _parse_tformat(fmt)
if reverse == 0:
if dtype in _fits2rec.keys(): # FITS format
if dtype == 'A':
output_format = _fits2rec[dtype]+`repeat`
# to accomodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
if fmt.lstrip()[0] == 'A' and option != '':
output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer
else:
_repeat = ''
if repeat != 1:
_repeat = `repeat`
output_format = _repeat+_fits2rec[dtype]
elif dtype == 'X':
nbytes = ((repeat-1) / 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
output_format = _FormatX(`(nbytes,)`+'u1')
output_format._nx = repeat
elif dtype == 'P':
output_format = _FormatP('2i4')
output_format._dtype = _fits2rec[option[0]]
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError, "Illegal format %s" % fmt
else:
if dtype == 'a':
output_format = option+_rec2fits[dtype]
elif isinstance(dtype, _FormatX):
print 'X format'
elif dtype+option in _rec2fits.keys(): # record format
_repeat = ''
if repeat != 1:
_repeat = `repeat`
output_format = _repeat+_rec2fits[dtype+option]
else:
raise ValueError, "Illegal format %s" % fmt
return output_format | 2861a74752328bf6e654f5f4ea31238f6b672f54 | 902 |
import re
import sys
def _module(root_pkg, name):
"""Imports the module, catching `ImportError`
Args:
root_pkg (str): top level package
name(str): unqualified name of the module to be imported
Returns:
module: imported module
"""
def _match_exc(e):
return re.search(
' {}$|{}'.format(
# py2
_module_from_cmd(name),
# py3
_module_name((root_pkg, name)),
),
str(e),
)
try:
return _import(root_pkg, name)
except Exception as e:
if (isinstance(e, ImportError) and _match_exc(e)
or isinstance(e, (argh.CommandError, CommandError))
):
sys.stderr.write(str(e) + "\n")
else:
raise
return None | 02063885d6fe4a4ef127a00c88e352ede04f1edd | 903 |
from pathlib import Path
def clean_elec_demands_dirpath(tmp_path: Path) -> Path:
"""Create a temporary, empty directory called 'processed'.
Args:
tmp_path (Path): see https://docs.pytest.org/en/stable/tmpdir.html
Returns:
Path: Path to a temporary, empty directory called 'processed'.
"""
dirpath = tmp_path / "processed"
mkdir(dirpath)
return dirpath / "SM_electricity" | 4d5a528a7e24b80678b41374deac2ac4580da50a | 904 |
def get_file_from_project(proj: Project, file_path):
"""
Returns a file object (or None, if error) from the HEAD of the default
branch in the repo. The default branch is usually 'main'.
"""
try:
file = proj.files.raw(file_path=file_path, ref=proj.default_branch)
LintReport.trace(f'Accessing \'{file_path}\' from {proj.name}.')
return file
except gitlab.GitlabGetError as _:
LintReport.trace(
f'Problem accessing \'{file_path}\' from {proj.name}.')
return None | 796203fabf6f25403f24e6c3f50d93f5e20d1d80 | 905 |
def get_logger_by_name(name: str):
"""
Gets the logger given the type of logger
:param name: Name of the value function needed
:type name: string
:returns: Logger
"""
if name not in logger_registry.keys():
raise NotImplementedError
else:
return logger_registry[name] | b17b0ad215f25940b751f995c6f7cd441f6cd4e6 | 906 |
def gen_appr_():
""" 16 consonants """
appr_ = list(voiced_approximant)
appr_.extend(unvoiced_approximant)
appr_.extend(voiced_lateral_approximant)
return appr_ | 948d52aa38ec03f0f3b21dcd6c2c5e60d30cdbb3 | 907 |
from typing import Union
from typing import Iterable
def convert_unit(
to_convert: Union[float, int, Iterable[Union[float, int, Iterable]]],
old_unit: Union[str, float, int],
new_unit: Union[str, float, int],
) -> Union[float, tuple]:
"""
Convert a number or sequence of numbers from one unit to another.
If either unit is a number it will be treated as the number of points per unit. So 72 would mean 1 inch.
Args:
to_convert (float, int, Iterable): The number / list of numbers, or points, to convert
old_unit (str, float, int): A unit accepted by fpdf.FPDF or a number
new_unit (str, float, int): A unit accepted by fpdf.FPDF or a number
Returns:
(float, tuple): to_convert converted from old_unit to new_unit or a tuple of the same
"""
unit_conversion_factor = get_scale_factor(new_unit) / get_scale_factor(old_unit)
if isinstance(to_convert, Iterable):
return tuple(
map(lambda i: convert_unit(i, 1, unit_conversion_factor), to_convert)
)
return to_convert / unit_conversion_factor | e4ac4f5ba405151d45cbab0b04fcf55a9710a0bf | 908 |
from typing import Union
from typing import Tuple
def image_preprocess(image, image_size: Union[int, Tuple[int, int]]):
"""Preprocess image for inference.
Args:
image: input image, can be a tensor or a numpy arary.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
Returns:
(image, scale): a tuple of processed image and its scale.
"""
input_processor = dataloader.DetectionInputProcessor(image, image_size)
input_processor.normalize_image()
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
image_scale = input_processor.image_scale_to_original
return image, image_scale | 091b2c3098bbf72a02a203486938c354719b3c83 | 909 |
def create_cluster_meta(cluster_groups):
"""Return a ClusterMeta instance with cluster group support."""
meta = ClusterMeta()
meta.add_field('group')
cluster_groups = cluster_groups or {}
data = {c: {'group': v} for c, v in cluster_groups.items()}
meta.from_dict(data)
return meta | 01c96d966c0c581c6d72cf7a8fb67cec9fd41d6e | 910 |
def dict_has_key_and_value_include_str(the_dict,key,str):
"""指定字典中包括键,并且键值包含某个字符片段"""
if the_dict.__contains__(key):
if str in the_dict[key]:
return True
return False | 56058581914233c9520986db7f80c4b879443e97 | 911 |
from typing import List
from typing import Any
from functools import reduce
import sys
def get_list_size(ls:List[Any]) -> float:
"""Return size in memory of a list and all its elements"""
return reduce(lambda x, y: x + y, (sys.getsizeof(v) for v in ls), 0) + sys.getsizeof(ls) | d930f3ef4ca9c5728153591d15e9b55211225d9a | 912 |
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__) | 2b6dbfc817416b8e5bce486ec12dad09281fb7b6 | 913 |
def get_formsets(what, extra=0, **kwargs):
"""Returns a list of formset instances"""
try:
related_fields = {}
relation_config = get_form_config('Relations', **kwargs)
operation = 'create' if 'Create' in what else 'update'
for relation in relation_config:
field_config = relation_config[relation]
related_fields[relation] = get_form_fields(operation, field_config)
def get_related_model(relation):
"""Returns related model"""
args = get_app_model_as_params(**kwargs)
args.pop()
args.append(relation)
return apps.get_model(*args)
return [inlineformset_factory(
get_model(**kwargs),
get_related_model(relation),
fields=related_fields[relation],
extra=extra
) for relation in related_fields]
except KeyError:
return [] | 39b6ec430c245cf54cc1d28abaf89271237ef961 | 914 |
def round_even(number):
"""Takes a number and returns it rounded even"""
# decimal.getcontext() -> ROUND_HALF_EVEN is default
return Decimal(number).quantize(0) | 2b19200a1a10597976fe29eaf6363cf59212241e | 915 |
def _build_conditional_single(cond, vals, model_cls=None):
"""
Builds the single conditional portion of a where clause.
Args:
cond (()/[]): The tuple/list containing the elements for a single
conditional statement. See Model.query_direct() docs for full details
on the format.
vals ({str:str/int/bool/datetime/enum/etc}): The mapping of variable names
as they will be used within parameterized format (i.e. `%(<>)s` format)
in the returned `clause`. This is expected to contain all variables
already built into the where clause currently being processed and will
be modified here if a value/variable is part of the conditional.
model_cls (Class<Model<>> or None): The class itself of the model holding
the valid column names. Can be None if skipping that check for
increased performance, but this is ONLY recommended if the source of the
column names in the structured `where` parameter is internally
controlled and was not subject to external user input to avoid SQL
injection attacks.
Returns:
(str): The portion of the clause that represents this single conditional.
Any variables will be in parameterized format (i.e. `%(<>)s` format).
Note that the `vals` provided will be modified by adding any new
variables included in this portion of the clause.
Raises:
(NonexistentColumnError): Raised if the column provided in the `cond` does
not exist in the official list of columns in the provided model (only
possible if model_cls provided as non-None).
(ValueError): Raised if the LogicOp provided as part of the `cond` is not
a valid LogicOp option for this Orm.
"""
if model_cls is not None:
_validate_cols([cond[0]], model_cls)
if cond[1] is model_meta.LogicOp.NOT_NULL:
return f'{cond[0]} NOT NULL'
# The rest below have a value, so all would use same key
val_key = f'wval{str(len(vals))}'
if cond[1] is model_meta.LogicOp.EQ \
or cond[1] is model_meta.LogicOp.EQUAL \
or cond[1] is model_meta.LogicOp.EQUALS:
vals[val_key] = cond[2]
return f'{cond[0]} = %({val_key})s'
if cond[1] is model_meta.LogicOp.LT \
or cond[1] is model_meta.LogicOp.LESS_THAN:
vals[val_key] = cond[2]
return f'{cond[0]} < %({val_key})s'
if cond[1] is model_meta.LogicOp.LTE \
or cond[1] is model_meta.LogicOp.LESS_THAN_OR_EQUAL:
vals[val_key] = cond[2]
return f'{cond[0]} <= %({val_key})s'
if cond[1] is model_meta.LogicOp.GT \
or cond[1] is model_meta.LogicOp.GREATER_THAN:
vals[val_key] = cond[2]
return f'{cond[0]} > %({val_key})s'
if cond[1] is model_meta.LogicOp.GTE \
or cond[1] is model_meta.LogicOp.GREATER_THAN_OR_EQUAL:
vals[val_key] = cond[2]
return f'{cond[0]} >= %({val_key})s'
err_msg = f'Invalid or Unsupported Logic Op: {cond[1]}'
logger.error(err_msg)
raise ValueError(err_msg) | bb61133f25901321df41f06fa5407cb98c596f88 | 916 |
def isNullOutpoint(tx):
"""
isNullOutpoint determines whether or not a previous transaction output point
is set.
"""
nullInOP = tx.txIn[0].previousOutPoint
if (
nullInOP.index == wire.MaxUint32
and nullInOP.hash == ByteArray(0, length=HASH_SIZE)
and nullInOP.tree == wire.TxTreeRegular
):
return True
return False | ac68a81dfabd7415136b5bfe0c38b6b551048e88 | 917 |
def cmyk_to_rgb(c, m, y, k):
"""
"""
r = (1.0 - c) * (1.0 - k)
g = (1.0 - m) * (1.0 - k)
b = (1.0 - y) * (1.0 - k)
return r, g, b | 03ece22efe6f88ff6e9f2825c72bcb4b18a238ef | 918 |
def get_by_id(group_id: int, db: Session = Depends(get_db), member: MemberModel = Depends(get_active_member)):
"""Get group by id"""
item = service.get_by_id(db, group_id)
return item | 035e5c0d74de017778f82052c5341f7c69b9dd8a | 919 |
import numpy as np
from .model_store import download_model
import os
def get_jasperdr(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create Jasper DR model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
blocks, repeat = tuple(map(int, version.split("x")))
main_stage_repeat = blocks // 5
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = JasperDr(
channels=channels,
kernel_sizes=kernel_sizes,
dropout_rates=dropout_rates,
repeat=repeat,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net | 8fc525eb20e75f143b07145b9cbd42de846d0547 | 920 |
from datetime import datetime
def productionadjustment():
# TODO: get route to display
"""Renders the home page."""
return render_template(
'productionadjustment.html',
title='Production Adjustment',
year=datetime.now().year,
wellsel=bigagg
) | 654fcd11078d9344889d214f6322211546b0045b | 921 |
import os
def upload_directory_targetid(token, local_dir, target_folder_id, skip_existed=False, show_skip_info=True, fencrypt=None):
"""
token: request_token
local_dir: 需要上传的文件夹, 如r"d:\to_be_uploaded"
target_folder_path: 上传的目标位置的父文件夹id
这个函数不是递归函数,使用os.walk mkdir_p upload完成上传任务
如果目标文件夹已经存在,会print一行[WARN]; 如果目标文件已经存在,会以在文件末尾添加(1)的形式上传 而不是替换!
"""
#检查本地目录要是一个目录
global a
assert os.path.isdir(local_dir), "expected a folder, local_dir={local_dir}".format(**locals())
name = getfilename(local_dir) #要上传的文件夹名称
fid = str(target_folder_id)
target_folder_lsdir = lsdir(a, fid)
# 对父目录进行了列目录,现在直接mkdir_p生成文件夹吧, sh*t cache!
try:
targetfid = mkdir(token, name, parent_id = fid)
except FileExistsError:
targetfid = [i[1].split("_")[1] for i in target_folder_lsdir if i[0]==name and i[1].split("_")[0]=="folder"][0]
print(targetfid)
cache = {"fs":{}, "path":"/", "fid": targetfid}
cache["fs"].update(generate_fscache("", fid = targetfid, prefix="/", cache=cache))
#print(cache["fs"])
target_folder_path = ""
for root, dirs, files in os.walk(local_dir):
for dir in dirs:
dirname = root.replace(local_dir,"",1).replace("\\",'/')+"/"+dir #"/Image/aha"
mkdir_p(token, target_folder_path+dirname, cache)
for filename in files:
relative_root = root.replace(local_dir,"",1).replace("\\",'/') #"/Image/aha"或者""
remote_abs_folder = target_folder_path+relative_root #"uploaded/Image/aha"或者"uploaded" 注意虽然叫做abs实际上还是相对于cache["path"]的相对目录
remote_abs_filepath = remote_abs_folder+"/"+safefilename(filename) #"uploaded/Image/aha/example.jpg"或者"uploaded/example.jpg"
#print(remote_abs_folder, cache)
type, folder_id = path_to_typed_id(remote_abs_folder, cache)
assert type=="folder", "expected folder {remote_abs_folder}".format(**locals())
local_filepath = os.path.join(local_dir, relative_root[1:], filename)
if skip_existed and remote_abs_filepath in cache["fs"]:
if show_skip_info:
print("skip existed file: {remote_abs_filepath}".format(**locals()))
continue
filesize = getsize(local_filepath)
if filesize>BLOCKSIZE:
data=block(open(local_filepath,"rb"), showhint=False)
else:
data=open(local_filepath,"rb").read()
newfileid = upload(token,filename,data,filesize,folder_id=folder_id,fencrypt=fencrypt)
cache["fs"][remote_abs_filepath] = ("file", newfileid, filesize)
return targetfid | 2dc6630f81d37746e7fdbd4a5bbd521b79083087 | 922 |
import logging
def thumbnail_download(data, k, url, mongo):
"""
缩略图下载
:return: 返回信息
"""
if data['thumbnail_urls'] is not None:
logging.debug(r'开始下载缩略图: %s' % data['thumbnail_urls'])
mongo.update(url, COD.REDTHU)
try:
referer = url if k != '163' else None # 网易的图片不加referer
thumbnail_local_files = files_download(
data['thumbnail_urls'], referer=referer
)
except Exception as e:
logging.debug(r'下载缩略图失败')
mongo.update(url, COD.THUERR)
message = e.args
raise AssertionError(r'{}:下载缩略图失败,\n message: {}'.format(url, message[0]))
logging.debug(r'下载缩略图成功')
mongo.update(url, COD.GETTHU)
return thumbnail_local_files
else:
# 视频使用openCV生成缩略图,暂未开发
logging.debug(r'缩略图为空,使用openCV生成缩略图,暂未开发')
mongo.update(url, COD.THUNIL)
return None | 2df0160109b96bf8f3eaee8f600ad5e1fa3cd3a8 | 923 |
def inMandelSet(x: int, y: int, max_iteration: int) -> int:
"""inMandelSet determines if complex(x,y) is in the mandelbrot set."""
z = 0
for k in range(max_iteration):
z = z ** 2 + complex(x,y)
if abs(z) > 2: return k
return k | 404beb051d0982c081a6564793017282451fa44b | 924 |
def isBinaryPalindrome(num):
"""assumes num is an integer
returns True if num in binary form is a palindrome, else False"""
return str(bin(num))[2::] == str(bin(num))[:1:-1] | 0181811e57964cb056391618084d9473b6c845e3 | 925 |
import thermos.commands
import os,pip
from app import create_app,db\n\n\
from platform import python_version
def main():
"""The main CLI entry-point."""
options = docopt(__doc__, version=VERSION)
def create_structure():
app_name = options['<appname>']
if not os.path.exists(app_name):
os.makedirs(app_name)
os.chdir(os.getcwd()+"/"+app_name)
os.system('git init')
os.system("touch .gitignore")
os.system("touch README.md")
with open('.gitignore','w+') as gitignore:
gitignore.write('virtual/ \n *.pyc \n start.sh')
gitignore.close()
if not os.path.exists('tests'):
os.makedirs('tests')
config_file = 'class Config:\n\tpass \n class ProdConfig(Config):\n\tpass\
\nclass DevConfig(Config): \n\tDEBUG = True\n\n\
config_options={"production":ProdConfig,"default":DevConfig}'
manage_file = "from flask_script import Manager,Server\n\
app = create_app('default')\n\n\
manager = Manager(app)\n\n\
manager.add_command('server', Server)\n\n\
if __name__ == '__main__':\n\
\tmanager.run()'\
"
with open('config.py','w+') as config:
config.write(config_file)
config.close()
with open('manage.py','w+') as manage:
manage.write(manage_file)
manage.close()
if not os.path.exists('app'):
os.makedirs('app')
os.chdir('app')
folders = ['static','templates','static/css','static/js','static/images']
base_html = "{% extends 'bootstrap/base.html' %}\n<!doctype html>\n<html><head>{% block head %}\
<link rel='stylesheet' href=\"{{ url_for('static', filename='style.css') }}\">\
<title>{% block title %}{% endblock %} - My Webpage</title>\
{% endblock %} </head> <body> <div id='content'>{% block content %}{% endblock %}</div><div id='footer'>\
{% block footer %}\
© Copyright 2010 by <a href='http://domain.invalid/'>you</a>.\
{% endblock %} </div> </body></html>"
for folder in folders:
if not os.path.exists(folder):
os.makedirs(folder)
if folder=='templates':
with open('templates/base.html','w+') as base_tem:
base_tem.write(base_html)
base_tem.close()
init_file = "from flask import Flask\nfrom config import config_options\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nbootstrap = Bootstrap()\ndb = SQLAlchemy()\ndef create_app(config_state):\n\tapp = Flask(__name__)\n\tapp.config.from_object(config_options[config_state])\n\n\n\tbootstrap.init_app(app)\n\tdb.init_app(app)\n\tfrom .main import main as main_blueprint\n\tapp.register_blueprint(main_blueprint)\n\treturn app"
with open('__init__.py','w+') as init:
init.write(init_file)
init.close()
with open('models.py','w+') as models:
models.write("#models")
models.close()
if not os.path.exists('main'):
os.makedirs('main')
os.chdir('main')
main_init_file = "from flask import Blueprint\nmain = Blueprint('main',__name__)\n\nfrom . import views,error"
view_file="from . import main\n\n@main.route('/')\ndef index():\n\treturn '<h1> Hello World </h1>'"
error_file="from flask import render_template\nfrom . import main\n\n@main.app_errorhandler(404)\ndef for_Ow_four(error):\n\t'''\n\tFunction to render the 404 error page\n\t'''\n\treturn render_template('fourOwfour.html'),404"
blueprint_files = ['__init__.py' ,'views.py' ,'error.py']
for blueprint_file in blueprint_files:
if blueprint_file == '__init__.py':
with open(blueprint_file,'w+') as m_init:
m_init.write(main_init_file)
m_init.close()
elif blueprint_file == 'views.py':
with open(blueprint_file,'w+') as vw:
vw.write(view_file)
vw.close()
else:
with open(blueprint_file,'w+') as er:
er.write(error_file)
er.close()
os.chdir('..')
os.chdir('..')
with open('tests/__init__.py','a') as test_init:
test_init.close()
with open('start.sh','w+') as start:
start.write('python3.6 manage.py server')
start.close()
os.system('chmod a+x start.sh')
version= str(python_version())[:3]
virtual="python%s -m venv virtual"%(version)
os.system(virtual)
os.system('. virtual/bin/activate')
dependencies = ['flask','flask-script', 'flask-bootstrap','gunicorn','flask-wtf','flask-sqlalchemy']
for dependency in dependencies:
pip.main(['install',dependency])
os.system('pip freeze > requirements.txt')
with open('Procfile','w+') as proc:
proc.write('web: gunicorn manage:app')
proc.close()
#ANIMATION CODE!
screen = curses.initscr()
width = screen.getmaxyx()[1]
height = screen.getmaxyx()[0]
size = width*height
char = [" ", ".", ":", "^", "*", "x", "s", "S", "#", "$"]
b = []
curses.curs_set(0)
curses.start_color()
curses.init_pair(1,0,0)
curses.init_pair(2,1,0)
curses.init_pair(3,3,0)
curses.init_pair(4,4,0)
screen.clear
for i in range(size+width+1): b.append(0)
for i in range(100):
for i in range(int(width/9)): b[int((random.random()*width)+width*(height-1))]=65
for i in range(size):
b[i]=int((b[i]+b[i+1]+b[i+width]+b[i+width+1])/4)
color=(4 if b[i]>15 else (3 if b[i]>9 else (2 if b[i]>4 else 1)))
if(i<size-1): screen.addstr( int(i/width),
i%width,
char[(9 if b[i]>9 else b[i])],
curses.color_pair(color) | curses.A_BOLD )
screen.refresh()
screen.timeout(30)
if (screen.getch()!=-1): break
curses.endwin()
animation = "|/-\\"
for i in range(20):
time.sleep(0.1)
sys.stdout.write("\r" + animation[i % len(animation)])
sys.stdout.flush()
#do something
print("End!")
cprint("\nCREATED APPLICATION FOLDER STRUCTURE\n HAPPY flasking :)\n","green")
BASE_FOLDER=os.getcwd()
app_folder = 'cd {}'.format(BASE_FOLDER)
os.system(app_folder)
else:
cprint("\nAnother folder with same name already exists\nPlease try with another name\n","red")
def check_app_is_flask():
existing_file_folders = ['app','virtual','config.py','manage.py','Procfile','README.md','requirements.txt','start.sh']
if all(os.path.exists(fl) for fl in existing_file_folders):
return True
else:
cprint("\nPlease navigate into the flask folder\n","red")
return False
def create_blueprint(blueprint_name):
os.makedirs(blueprint_name)
os.chdir(blueprint_name)
blueprint_name_init_file = "from flask import Blueprint\n{} = Blueprint('{}',__name__)\n\nfrom . import views,error".format(blueprint_name,blueprint_name)
view_file="from . import {}\n\n@{}.route('/')\ndef index():\n\treturn '<h1> Hello world </h1>'".format(blueprint_name,blueprint_name)
error_file="from flask import render_template\nfrom . import {}\n\n@{}.app_errorhandler(404)\ndef four_Ow_four(error):\n\t'''\n\tFunction to render the 404 error page\n\t'''\n\treturn render_template('fourOwfour.html'),404".format(blueprint_name,blueprint_name)
blueprint_files = ['__init__.py', 'views.py', 'error.py']
for blueprint_file in blueprint_files:
if blueprint_file == '__init__.py':
with open(blueprint_file,'w+') as b_init:
b_init.write(blueprint_name_init_file)
b_init.close()
elif blueprint_file == 'views.py':
with open(blueprint_file,'w+') as v:
v.write(view_file)
v.close()
else:
with open(blueprint_file,'w+') as err:
err.write(error_file)
err.close()
def create_template(template_name):
with open(template_name+'.html','w+') as template:
template.write("{% extends 'base.html' %}")
template.close()
def add_blueprint():
if check_app_is_flask():
os.chdir('app')
blueprint_name = options['<blueprintname>']
if not os.path.exists(blueprint_name):
create_blueprint(blueprint_name)
temp_message = "Blueprint {} created!".format(blueprint_name)
else:
temp_message = "Blueprint {} already exists!".format(blueprint_name)
cprint(temp_message,"magenta")
def add_template():
if check_app_is_flask():
os.chdir('app')
os.chdir('templates')
template_name = options['<templatename>']
if not os.path.exists(template_name+'.html'):
create_template(template_name)
temp_message = "Template {} created!".format(template_name)
else:
temp_message = "Template {} already exists!".format(template_name)
cprint(temp_message,"magenta")
if options['create']:
try:
if options['app'] and options['<appname>']:
create_structure()
if options['blueprint'] and options['<blueprintname>']:
add_blueprint()
if options['template'] and options['<templatename>']:
add_template()
except:
cprint("\nOops!An error occured\nPlease try again\n","red") | 8d118c409222d7650040cbfb865c9d7952e6d5cf | 926 |
def mprv_from_entropy(entropy: GenericEntropy,
passphrase: str,
lang: str,
xversion: bytes) -> bytes:
"""Return a BIP32 master private key from entropy."""
mnemonic = mnemonic_from_entropy(entropy, lang)
mprv = mprv_from_mnemonic(mnemonic, passphrase, xversion)
return mprv | 7dc9ce4c25f9b84f16731eb37be371de95187a8b | 927 |
def analyze_audio(audio_filename, target_freq=TARGET_FREQS, win_size=5000, step=200, min_delay=BEEP_DURATION, sensitivity=250, verbose=True):
"""
Analyze the given audio file to find the tone markers, with the respective frequency and time position.
:param str audio_filename: The Audio filename to analyze to find the markers.
:param tuple target_freq: A tuple containing the int frequencies ( in Hertz ) that the function should recognize.
:param int win_size: The size of the moving window for the analysys.
Increasing the window increases the accuracy but takes longer.
:param int step: the increment between each window.
:param float min_delay: Minimum duration, in seconds, of the beep to be recognized.
:param int sensitivity: Minimum value of relative amplitude of the beep to be recognized.
:param bool verbose: If true, print some info on the screen.
:return: a list of dict containing the markers positions and frequencies.
"""
print("Analyzing the Audio...")
# Open the wav audio track
# Get the sample rate (fs) and the sample data (data)
fs, data = wavfile.read(audio_filename)
# Calculate the duration, in seconds, of a sample
sample_duration = 1.0 / fs
# Get the total number of samples
total_samples = data.shape[0]
# Calculate the frequencies that the fourier transform can analyze
frequencies = np.fft.fftfreq(win_size)
# Convert them to Hertz
hz_frequencies = frequencies * fs
# Calculate the indexes of the frequencies that are compatible with the target_freq
freq_indexes = []
for freq in target_freq:
# Find the index of the nearest element
index = (np.abs(hz_frequencies - freq)).argmin()
freq_indexes.append(index)
# This will hold the duration of each frequency pulse
duration_count = {}
# Initialize the dictionary
for freq in target_freq:
duration_count[freq] = 0
# Initialize the counter
count = 0
# This list will hold the analysis result
results = []
# Analyze the audio dividing the samples into windows, and analyzing each
# one separately
for window in mit.windowed(data, n=win_size, step=step, fillvalue=0):
# Calculate the FFT of the current window
fft_data = np.fft.fft(window)
# Calculate the amplitude of the transform
fft_abs = np.absolute(window)
# Calculate the mean of the amplitude
fft_mean = np.mean(fft_abs)
# Calculate the current time of the window
ctime = count * sample_duration
# Check, for each target frequency, if present
for i, freq in enumerate(target_freq):
# Get the relative amplitude of the current frequency
freq_amplitude = abs(fft_data[freq_indexes[i]]) / fft_mean
# If the amplitude is greater than the sensitivity,
# Increase the duration counter for the current frequency
if freq_amplitude > sensitivity:
duration_count[freq] += step * sample_duration
else:
# If the duration is greater than the minimum delay, add the result
if duration_count[freq] > min_delay:
results.append({'time': ctime, 'freq': freq})
# Print the result if verbose
if verbose:
print("--> found freq:", freq, "time:", ctime)
duration_count[freq] = 0
count += step
# Print the progress every 100000 samples
if verbose and count % 100000 == 0:
percent = round((count/total_samples) * 100)
print("\rAnalyzing {}% ".format(percent), end="")
print() # Reset the new line
return results | 63a5dfd65075b592662309082630011c234a3d52 | 928 |
import json
def read_usgs_file(file_name):
"""
Reads a USGS JSON data file (from https://waterdata.usgs.gov/nwis)
Parameters
----------
file_name : str
Name of USGS JSON data file
Returns
-------
data : pandas DataFrame
Data indexed by datetime with columns named according to the parameter's
variable description
"""
with open(file_name) as json_file:
text = json.load(json_file)
data = _read_usgs_json(text)
return data | cfba1da7bb5f34a18292dc914f8128cab538850e | 929 |
def get_cantus_firmus(notes):
"""
Given a list of notes as integers, will return the lilypond notes
for the cantus firmus.
"""
result = ""
# Ensure the notes are in range
normalised = [note for note in notes if note > 0 and note < 18]
if not normalised:
return result
# Set the duration against the first note.
result = NOTES[normalised[0]] + " 1 "
# Translate all the others.
result += " ".join([NOTES[note] for note in normalised[1:]])
# End with a double bar.
result += ' \\bar "|."'
# Tidy up double spaces.
result = result.replace(" ", " ")
return result | d193088a6665df363d032f69b6fd3db80c8bce4a | 930 |
def get_wildcard_values(config):
"""Get user-supplied wildcard values."""
return dict(wc.split("=") for wc in config.get("wildcards", [])) | 0ca15b82ebed47dec9d46991cb4db45ee72eb3af | 931 |
def predict(model_filepath, config, input_data):
"""Return prediction from user input."""
# Load model
model = Model.load(model_filepath + config['predicting']['model_name'])
# Predict
prediction = int(np.round(model.predict(input_data), -3)[0])
return prediction | afc61eaba1265efded59f182fa6639a3d2e534e2 | 932 |
from typing import List
from typing import Optional
from pathlib import Path
import os
import tempfile
from typing import Tuple
from typing import Union
import subprocess
import sys
def main() -> int:
"""Runs protoc as configured by command-line arguments."""
parser = _argument_parser()
args = parser.parse_args()
if args.plugin_path is None and args.language not in BUILTIN_PROTOC_LANGS:
parser.error(
f'--plugin-path is required for --language {args.language}')
args.out_dir.mkdir(parents=True, exist_ok=True)
include_paths: List[str] = []
if args.include_file:
include_paths = [f'-I{line.strip()}' for line in args.include_file]
wrapper_script: Optional[Path] = None
# On Windows, use a .bat version of the plugin if it exists or create a .bat
# wrapper to use if none exists.
if os.name == 'nt' and args.plugin_path:
if args.plugin_path.with_suffix('.bat').exists():
args.plugin_path = args.plugin_path.with_suffix('.bat')
_LOG.debug('Using Batch plugin %s', args.plugin_path)
else:
with tempfile.NamedTemporaryFile('w', suffix='.bat',
delete=False) as file:
file.write(f'@echo off\npython {args.plugin_path.resolve()}\n')
args.plugin_path = wrapper_script = Path(file.name)
_LOG.debug('Using generated plugin wrapper %s', args.plugin_path)
cmd: Tuple[Union[str, Path], ...] = (
'protoc',
f'-I{args.compile_dir}',
*include_paths,
*DEFAULT_PROTOC_ARGS[args.language](args),
*args.sources,
)
try:
process = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
finally:
if wrapper_script:
wrapper_script.unlink()
if process.returncode != 0:
_LOG.error('Protocol buffer compilation failed!\n%s',
' '.join(str(c) for c in cmd))
sys.stderr.buffer.write(process.stdout)
sys.stderr.flush()
return process.returncode | 1017c086b934a6f32cfe732bba3d060842b23cf4 | 933 |
def gauss3D_FIT(xyz, x0, y0, z0, sigma_x, sigma_y, sigma_z):
"""
gauss3D_FIT((x,y,z),x0,y0,z0,sigma_x,sigma_y,sigma_z)
Returns the value of a gaussian at a 2D set of points for the given
standard deviations with maximum normalized to 1.
The Gaussian axes are assumed to be 90 degrees from each other.
xyz -
x0, y0, z0 = the x, y, z centers of the Gaussian
sigma_x, sigma_y, sigma_z = The std. deviations of the Gaussian.
Note
-----
Be careful about the indexing used in meshgrid and the order in which you pass the x, y, z variables in.
Parameters
----------
xyz: tuple of ndarrays
A tuple containing the 3D arrays of points (from meshgrid)
x0, y0, z0: float
The x, y, z centers of the Gaussian
sigma_x, sigma_y, sigma_z: float
The standard deviations of the Gaussian.
Returns
-------
g3_norm: ndarray
A flattened array for fitting.
"""
x0 = float(x0)
y0 = float(y0)
z0 = float(z0)
x = xyz[0]
y = xyz[1]
z = xyz[2]
g3 = np.exp(
-(
(x - x0) ** 2 / (2 * sigma_x ** 2)
+ (y - y0) ** 2 / (2 * sigma_y ** 2)
+ (z - z0) ** 2 / (2 * sigma_z ** 2)
)
)
g3_norm = g3 / np.max(g3.flatten())
return g3_norm.ravel() | 8e4337760c8064fb553361240f2cfa04ec379c76 | 934 |
async def tell(message: str) -> None:
"""Send a message to the user.
Args:
message: The message to send to the user.
"""
return await interaction_context().tell(message) | 8e82ceece1896b2b8cc805cf30cca79e64e0cf4e | 935 |
def PGetDim (inFFT):
"""
Get dimension of an FFT
returns array of 7 elements
* inFFT = Python Obit FFT
"""
################################################################
# Checks
if not PIsA(inFFT):
raise TypeError("inFFT MUST be a Python Obit FFT")
return Obit.FFTGetDim(inFFT.me)
# end PGetDim | c95f80f465f0f69a2e144bf4b52a2e7965c8f87c | 936 |
def score_detail(fpl_data):
"""
convert fpl_data into Series
Index- multi-index of team, pos, player, opp, minutes
"""
l =[]
basic_index = ["player", "opp", "minutes"]
for i in range(len(fpl_data["elements"])):
ts=achived_from(fpl_data, i, True)
name = (fpl_data["elements"][i]["first_name"]+
fpl_data["elements"][i]["second_name"])
if len(ts)==0:
continue
ts=pd.concat([ts,], keys=[name], names=basic_index)
ele = pos_map(fpl_data)[fpl_data["elements"][i]['element_type']]
ts=pd.concat([ts,], keys=[ele], names=["pos"]+basic_index)
team = team_map(fpl_data)[fpl_data["elements"][i]['team']]
ts=pd.concat([ts,], keys=[team], names=["team", "pos"]+basic_index)
l.append(ts)
return pd.concat(l) | fd70f92efffb42e8d5849f4fa2eaf090e87daa57 | 937 |
def edition_view(measurement, workspace, exopy_qtbot):
"""Start plugins and add measurements before creating the execution view.
"""
pl = measurement.plugin
pl.edited_measurements.add(measurement)
measurement.root_task.add_child_task(0, BreakTask(name='Test'))
item = MeasurementEditorDockItem(workspace=workspace,
measurement=measurement,
name='test')
return DockItemTestingWindow(widget=item) | f84ed466468b9732c9aef9c3fc9244a5e57583cd | 938 |
def menu_items():
""" Add a menu item which allows users to specify their session directory
"""
def change_session_folder():
global session_dir
path = str(QtGui.QFileDialog.getExistingDirectory(None,
'Browse to new session folder -'))
session_dir = path
utils.setrootdir(path)
writetolog("*" * 79 + "\n" + "*" * 79)
writetolog(" output directory: " + session_dir)
writetolog("*" * 79 + "\n" + "*" * 79)
lst = []
lst.append(("Change session folder", change_session_folder))
return(lst) | ec5177e53eaa1a2de38276ca95d41f944dd9d4a3 | 939 |
def calculate_pair_energy_np(coordinates, i_particle, box_length, cutoff):
"""
Calculates the interaction energy of one particle with all others in system.
Parameters:
```````````
coordinates : np.ndarray
2D array of [x,y,z] coordinates for all particles in the system
i_particle : int
the particle row for which to calculate energy
box_length : float
the length of the simulation box
cutoff : float
the cutoff interaction length
Returns:
````````
e_total : float
the pairwise energy between the i-th particle and other particles in system
"""
particle = coordinates[i_particle][:]
coordinates = np.delete(coordinates, i_particle, 0)
e_array = np.zeros(coordinates.shape)
dist = calculate_distance_np(particle, coordinates, box_length)
e_array = dist[dist < cutoff]
e_array = calculate_LJ_np(e_array)
e_total = e_array.sum()
return e_total | fecc44e54b4cbef12e6b197c34971fc54a91d3ce | 940 |
def inside_loop(iter):
"""
>>> inside_loop([1,2,3])
3
>>> inside_loop([])
Traceback (most recent call last):
...
UnboundLocalError: local variable 'i' referenced before assignment
"""
for i in iter:
pass
return i | c94720cddec7d3d151c9aea8d8d360564fbffe66 | 941 |
def _pattern_data_from_form(form, point_set):
"""Handles the form in which the user determines which algorithms
to run with the uploaded file, and computes the algorithm results.
Args:
form: The form data
point_set: Point set representation of the uploaded file.
Returns:
Musical pattern discovery results of the algorithms
chosen by the user.
"""
pattern_data = []
# SIATEC
min_pattern_length = form.getlist('siatec-min-pattern-length')
min_pattern_length = [int(x) for x in min_pattern_length]
for i in range(len(min_pattern_length)):
pattern_data.append(
siatec.compute(
point_set=point_set,
min_pattern_length=min_pattern_length[i]
)
)
# timewarp-invariant algorithm
window = form.getlist('timewarp-window')
window = [int(x) for x in window]
min_pattern_length = form.getlist('timewarp-min-pattern-length')
min_pattern_length = [int(x) for x in min_pattern_length]
for i in range(len(window)):
pattern_data.append(
time_warp_invariant.compute(
point_set=point_set,
window=window[i],
min_pattern_length=min_pattern_length[i]
)
)
return pattern_data | ba69a058fd6a641166ebf4040dc7f780fc8b1a1e | 942 |
def group(help_doc):
"""Creates group options instance in module options instnace"""
return __options.group(help_doc) | a715353bb86ecd511522283c941a66830926a1d3 | 943 |
from io import StringIO
def convert_pdf_to_txt(path, pageid=None):
"""
This function scrambles the text. There may be values for LAParams
that fix it but that seems difficult so see getMonters instead.
This function is based on convert_pdf_to_txt(path) from
RattleyCooper's Oct 21 '14 at 19:47 answer
edited by Trenton McKinney Oct 4 '19 at 4:10
on <https://stackoverflow.com/a/26495057>.
Keyword arguments:
pageid -- Only process this page id.
"""
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
try:
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
except TypeError as ex:
if ("codec" in str(ex)) and ("unexpected keyword" in str(ex)):
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
# print("page: {}".format(dir(page)))
if (pageid is None) or (pageid == page.pageid):
print("page.pageid: {}".format(page.pageid))
interpreter.process_page(page)
if pageid is not None:
break
text = retstr.getvalue()
print(text)
fp.close()
device.close()
retstr.close()
return text | 9c215c539054bd88c5d7f2bf9d38e904fc53b0d6 | 944 |
from typing import Optional
import socket
import os
def get_available_port(preferred_port: Optional[int] = None) -> int:
"""Finds an available port for use in webviz on localhost. If a reload process,
it will reuse the same port as found in the parent process by using an inherited
environment variable.
If preferred_port is given, ports in the range [preferred_port, preferred_port + 20)
will be tried first, before an OS provided random port is used as fallback.
"""
def is_available(port: int) -> bool:
with socket.socket() as sock:
try:
sock.bind(("localhost", port))
return True
except OSError:
return False
if os.environ.get("WEBVIZ_PORT") is None:
port = None
if preferred_port is not None:
for port_to_test in range(preferred_port, preferred_port + 20):
if is_available(port_to_test):
port = port_to_test
break
if port is None:
with socket.socket() as sock:
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
os.environ["WEBVIZ_PORT"] = str(port)
return port
return int(os.environ.get("WEBVIZ_PORT")) | 0c5ba897252dee42e097608d240068be2de3fe52 | 945 |
def safe_str(val, default=None):
"""Safely cast value to str, Optional: Pass default value. Returned if casting fails.
Args:
val:
default:
Returns:
"""
if val is None:
return default if default is not None else ''
return safe_cast(val, str, default) | d5abb2426de99aa8aac22660ce53fa4aec6424e3 | 946 |
def mod2():
"""
Create a simple model for incorporation tests
"""
class mod2(mod1):
def __init__(self, name, description):
super().__init__(name, "Model 1")
self.a = self.createVariable("a",dimless,"a")
self.b = self.createVariable("b",dimless,"b")
self.c = self.createParameter("c",dimless,"c")
self.c.setValue(2.)
eq21 = self.a() + self.b() + self.c()
eq22 = self.b() - self.f()
self.createEquation("eq21", "Generic equation 2.1", eq21)
self.createEquation("eq22", "Generic equation 2.2", eq22)
mod = mod2("M2", "Model 2")
mod()
return mod | cef4ad517971a1eb00ece97b7d90be1895e1ab0f | 947 |
def zero_order(freq,theta,lcandidat,NumTopic):
"""
Calculate the Zero-Order Relevance
Parameters:
----------
freq : Array containing the frequency of occurrences of each word in the whole corpus
theta : Array containing the frequency of occurrences of each word in each topic
lcandidat: Array containing each label candidate
NumTopic : The number of the topic
Returns:
-------
topCandidate : Array containing the name of the top 10 score candidate for a given topic
"""
#W matrice qui contient le score de chaque mot pour chaque topic
W=np.log(theta/freq)
# score des tous les candidats pour le topic NumTopic
score=np.array([])
for indice in range (len(lCandidat)):
candidat=lCandidat[indice].split(" ")
i=id2word.doc2idx(candidat)
# supprime les -1 (qui signifie pas trouvé)
i[:] = [v for v in i if v != -1]
score=np.append(score,np.sum(W[k,i]))
#topValue, topCandidate = top10Score(score,lCandidat)
dicti=top10ScoreCandidat(score,lcandidat)
return dicti | 38cd3207b375db06302cb063270c180bc4b9617b | 948 |
def compute_check_letter(dni_number: str) -> str:
"""
Given a DNI number, obtain the correct check letter.
:param dni_number: a valid dni number.
:return: the check letter for the number as an uppercase, single character
string.
"""
return UPPERCASE_CHECK_LETTERS[int(dni_number) % 23] | 58a7d54db2736351aef4957f17ed55ce13af7f0a | 949 |
import time
def uptime_check(delay=1):
"""Performs uptime checks to two URLs
Args:
delay: The number of seconds delay between two uptime checks, optional, defaults to 1 second.
Returns: A dictionary, where the keys are the URL checked, the values are the corresponding status (1=UP, 0=DOWN)
"""
urls = ["https://httpstat.us/503", "https://httpstat.us/200"]
url_status = {}
for url in urls:
url_status[url] = check_url(url)[0]
time.sleep(delay)
return url_status | 69c8f76a28ec0cb59f08252d8d2bcb04fc85782e | 950 |
def entropy_column(input):
"""returns column entropy of entropy matrix.
input is motifs"""
nucleotides = {'A': 0, 'T': 0, 'C': 0, 'G': 0}
for item in input:
nucleotides[item] = nucleotides[item]+1
for key in nucleotides:
temp_res = nucleotides[key]/len(input)
if temp_res > 0:
nucleotides[key] = temp_res * abs(log2(temp_res))
else:
continue
sum = 0
for key in nucleotides:
sum = sum + nucleotides[key]
# print(nucleotides)
return sum | 3079f7b5d40e02f00b7f36de6ad6df9ff6b6ec41 | 951 |
def get_partner_from_token(access_token):
"""
Walk the token->client->user->partner chain so we can
connect the the `LinkageEntity` row to a `PartnerEntity`
"""
tok = OauthAccessTokenEntity.query.filter_by(
access_token=access_token).one_or_none()
log.debug("get_partner_from_token found: {}".format(tok))
return tok.client.user.partner | 2ad3ebc962caedaba7567ec3ef977fc60d7ffe52 | 952 |
def sumVoteCount(instance):
""" Returns the sum of the vote count of the instance.
:param instance: The instance.
:type instance: preflibtools.instance.preflibinstance.PreflibInstance
:return: The sum of vote count of the instance.
:rtype: int
"""
return instance.sumVoteCount | 6683e31a2e5ec9904c5f35e60622310b6688a635 | 953 |
def left_d_threshold_sequence(n,m):
"""
Create a skewed threshold graph with a given number
of vertices (n) and a given number of edges (m).
The routine returns an unlabeled creation sequence
for the threshold graph.
FIXME: describe algorithm
"""
cs=['d']+['i']*(n-1) # create sequence with n insolated nodes
# m <n : not enough edges, make disconnected
if m < n:
cs[m]='d'
return cs
# too many edges
if m > n*(n-1)/2:
raise ValueError("Too many edges for this many nodes.")
# Connected case when M>N-1
cs[n-1]='d'
sum=n-1
ind=1
while sum<m:
cs[ind]='d'
sum += ind
ind += 1
if sum>m: # be sure not to change the first vertex
cs[sum-m]='i'
return cs | 42010e427530705292804d57c1f52c0ab163322b | 954 |
import os
def write_json(obj, filename):
"""
Write a json file, if the output directory exists.
"""
if not os.path.exists(os.path.dirname(filename)):
return
return write_file(sjson.dump(obj), filename) | a48d4fdc7fe0c7ec027d22699804c1a6301c5456 | 955 |
def get_user_solutions(username):
"""Returns all solutions submitted by the specified user.
Args:
username: The username.
Returns:
A solution list.
Raises:
KeyError: If the specified user is not found.
"""
user = _db.users.find_one({'_id': username})
if not user:
raise KeyError('User not found: %s' % username)
solutions = _db.solutions.find(
{
'owner': user['_id']
},
projection=('resemblance_int', 'solution_size', 'problem_id', '_id'))
# manually select the best (and oldest) solution
table = {}
for solution in solutions:
problem_id = solution['problem_id']
if problem_id in table:
old_solution = table[problem_id]
if solution['resemblance_int'] > old_solution['resemblance_int'] or \
(solution['resemblance_int'] == old_solution['resemblance_int'] and solution['_id'] < old_solution['_id']):
table[problem_id] = solution
else:
table[problem_id] = solution
# sort by problem_id
solutions = table.values()
solutions.sort(key=lambda solution: solution['problem_id'])
return solutions | b1257962ee52707d39988ec1cc535c390df064e6 | 956 |
import os
def get_terms_kullback_leibler(output_dir):
"""Returns all zero-order TERMs propensities of structure"""
if output_dir[-1] != '/':
output_dir += '/'
frag_path = output_dir + 'fragments/'
designscore_path = output_dir + 'designscore/'
terms_propensities = dict()
terms = [f.split('.')[0] for f in os.listdir(frag_path)]
for term in terms:
rns = get_resnums(frag_path + term + '.pdb')
rn = int(term.split('_')[-1][1:])
seq_dict = zero_order_freq(rn, rns, designscore_path + 't1k_' + term + '.seq')
si_dict = calc_kullback_leibler(seq_dict)
terms_propensities[rn] = si_dict
return terms_propensities | d0ea46883a7c8d952ca186715c99e7891e773167 | 957 |
def add_standard_attention_hparams(hparams):
"""Adds the hparams used by get_standadized_layers."""
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
# hparams used and which should have been defined outside (in
# common_hparams):
# Global flags
# hparams.mode
# hparams.hidden_size
# Pre-post processing flags
# hparams.layer_preprocess_sequence
# hparams.layer_postprocess_sequence
# hparams.layer_prepostprocess_dropout
# hparams.norm_type
# hparams.norm_epsilon
# Mixture-of-Expert flags
# hparams.moe_hidden_sizes
# hparams.moe_num_experts
# hparams.moe_k
# hparams.moe_loss_coef
# Attention layers flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
# Attention: Local
hparams.add_hparam("attention_loc_block_length", 256)
# Attention: Local (unmasked only): How much to look left.
hparams.add_hparam("attention_loc_block_width", 128)
# Attention: Memory-compressed
hparams.add_hparam("attention_red_factor", 3)
hparams.add_hparam("attention_red_type", "conv")
hparams.add_hparam("attention_red_nonlinearity", "none")
# Fully connected layers flags
# To be more consistent, should use filter_size to also control the MOE
# size if moe_hidden_sizes not set.
hparams.add_hparam("filter_size", 2048)
hparams.add_hparam("relu_dropout", 0.0)
return hparams | de9f1a3b30a105a89d3400ca0b36e4c747f1ab46 | 958 |
import collections
import functools
def make_conv2d_layer_class(strides, padding):
"""Creates a `Conv2DLayer` class.
Args:
strides: A 2-tuple of positive integers. Strides for the spatial dimensions.
padding: A Python string. Can be either 'SAME' or 'VALID'.
Returns:
conv2d_layer_class: A new `Conv2DLayer` class that closes over the args to
this function.
"""
# TODO(siege): We do this rather than storing parameters explicitly inside the
# class because we want to avoid having to use a CompositeTensor-like
# functionality, as that requires annotating a large porting of TFP with
# expand_composites=True. This isn't worth doing for this experimental
# library.
class Conv2DLayer(collections.namedtuple('Conv2DLayer', [
'kernel',
])):
"""2-dimensional convolution (in the standard deep learning sense) layer.
See `tf.nn.conv` for the mathematical details of what this does.
Attributes:
kernel: A floating point Tensor with shape `[width, height, in_channels,
out_channels]`.
"""
__slots__ = ()
@property
def strides(self):
"""Strides for the spatial dimensions."""
return strides
@property
def padding(self):
"""Padding."""
return padding
def __call__(self, x):
"""Applies the layer to an input.
Args:
x: A floating point Tensor with shape `[batch, height, width,
in_channels]`.
Returns:
y: A floating point Tensor with shape `[batch, height', width',
out_channels]`. The output `width'` and `height'` depend on the value
of
`padding`.
"""
@functools.partial(
vectorization_util.make_rank_polymorphic, core_ndims=(4, 4))
# In an ideal world we'd broadcast the kernel shape with the batch shape
# of the input, but the hardware does not like that.
def do_conv(kernel, x):
return tf.nn.conv2d(
x,
filters=kernel,
strides=(1,) + self.strides + (1,),
padding=self.padding,
)
return do_conv(self.kernel, x)
return Conv2DLayer | 6c665c0016a5d0145556093b51dd5d127560bc23 | 959 |
def get_df1_df2(X: np.array, y: np.array) -> [DataFrame, DataFrame]:
"""
Get DataFrames for points with labels 1 and -1
:param X:
:param y:
:return:
"""
x1 = np.array([X[:, i] for i in range(y.shape[0]) if y[i] == 1]).T
x2 = np.array([X[:, i] for i in range(y.shape[0]) if y[i] == -1]).T
df1 = DataFrame({'x': list(), 'y': list()})
df2 = DataFrame({'x': list(), 'y': list()})
if len(x1 > 0):
df1 = DataFrame({'x': x1[0], 'y': x1[1]})
if len(x2 > 0):
df2 = DataFrame({'x': x2[0], 'y': x2[1]})
return [df1, df2] | 783a69a9be0e56ca3509fa38845df4f1533ef45e | 960 |
import base64
def dnsip6encode(data):
"""
encodes the data as a single IPv6 address
:param data: data to encode
:return: encoded form
"""
if len(data) != 16:
print_error("dnsip6encode: data is more or less than 16 bytes, cannot encode")
return None
res = b''
reslen = 0
for i in range(len(data)):
res += base64.b16encode(data[i:i+1])
reslen += 1
if reslen % 2 == 0:
res += b':'
return res[:-1] | 0055029150c1a125b88ac5f5700d8bf2fb70d9c2 | 961 |
def gcm_send_bulk_message(registration_ids, data, encoding='utf-8', **kwargs):
"""
Standalone method to send bulk gcm notifications
"""
messenger = GCMMessenger(registration_ids, data, encoding=encoding, **kwargs)
return messenger.send_bulk() | cace5a07d0b903d0f4aa1694faf7366ea7b9c928 | 962 |
import torch
def apply_net_video(net, arr, argmax_output=True, full_faces='auto'):
"""Apply a preloaded network to input array coming from a video of one eye.
Note that there is (intentionally) no function that both loads the net and applies it; loading
the net should ideally only be done once no matter how many times it is run on arrays.
Arguments:
net: Network loaded by load_net
arr: numpy array of shape (h, w, 3) or (batch_size, h, w, 3) with colors in RGB order
generally (h, w) = (4000, 6000) for full faces and (4000, 3000) for half-faces
although inputs are all resized to (256, 256)
argmax_output: if True, apply argmax to output values to get categorical mask
full_faces: whether inputs are to be treated as full faces; note that the networks take half-faces
By default, base decision on input size
Returns:
Segmentation mask and potentially regression output.
Regression output present if a regression-generating network was used
Segmentation mask a numpy array of shape (batch_size, h, w) if argmax_output
else (batch_size, h, w, num_classes)
Regression output a numpy array of shape (batch_size, 4) for half-faces or (batch_size, 8) for full faces;
one iris's entry is in the format (x,y,r,p) with p the predicted probability of iris presence;
for full faces, each entry is (*right_iris, *left_iris)"""
if len(arr.shape)==3:
arr = arr[np.newaxis]
tens = torch.tensor(arr.transpose(0,3,1,2), dtype=torch.float)
orig_tens_size = tens.size()[2:]
input_tensor = F.interpolate(tens, size=(256,256), mode='bilinear', align_corners=False)
input_tensor = input_tensor.cuda()
with torch.no_grad():
output = net(input_tensor)
if 'reg' in net.outtype:
seg, reg = output
reg = reg.detach().cpu().numpy()
reg = np.concatenate([reg[:,:3], sigmoid(reg[:,3:])], 1)
else:
seg = output
segmentation = seg.detach().cpu()
segmentation = F.interpolate(segmentation, size=orig_tens_size, mode='bilinear', align_corners=False)
seg_arr = segmentation.numpy().transpose(0,2,3,1)
seg_arr = cleanupseg(seg_arr)
if argmax_output:
seg_arr = np.argmax(seg_arr, 3)
if 'reg' in net.outtype:
return seg_arr, reg
else:
return seg_arr | 3d6acd156761c651572a8b6a27d8511b2e88cc20 | 963 |
def Storeligandnames(csv_file):
"""It identifies the names of the ligands in the csv file
PARAMETERS
----------
csv_file : filename of the csv file with the ligands
RETURNS
-------
lig_list : list of ligand names (list of strings)
"""
Lig = open(csv_file,"rt")
lig_aux = []
for ligand in Lig:
lig_aux.append(ligand.replace(" ","_").replace("\n","").lower())
return lig_aux | dc4510a4ea946eaf00152cb445acdc7535ce0379 | 964 |
def chunk_to_rose(station):
"""
Builds data suitable for Plotly's wind roses from
a subset of data.
Given a subset of data, group by direction and speed.
Return accumulator of whatever the results of the
incoming chunk are.
"""
# bin into three different petal count categories: 8pt, 16pt, and 26pt
bin_list = [
list(range(5, 356, 10)),
list(np.arange(11.25, 349, 22.5)),
list(np.arange(22.5, 338, 45)),
]
bname_list = [
list(range(1, 36)),
list(np.arange(2.25, 34, 2.25)),
list(np.arange(4.5, 32, 4.5)),
]
# Accumulator dataframe.
proc_cols = [
"sid",
"direction_class",
"speed_range",
"count",
"frequency",
"decade",
"pcount",
]
accumulator = pd.DataFrame(columns=proc_cols)
for bins, bin_names, pcount in zip(bin_list, bname_list, [36, 16, 8]):
# Assign directions to bins.
# We'll use the exceptional 'NaN' class to represent
# 355º - 5º, which would otherwise be annoying.
# Assign 0 to that direction class.
ds = pd.cut(station["wd"], bins, labels=bin_names)
station = station.assign(direction_class=ds.cat.add_categories("0").fillna("0"))
# First compute yearly data.
# For each direction class...
directions = station.groupby(["direction_class"])
for direction, d_group in directions:
# For each wind speed range bucket...
for bucket, bucket_info in speed_ranges.items():
d = d_group.loc[
(
station["ws"].between(
bucket_info["range"][0],
bucket_info["range"][1],
inclusive=True,
)
== True
)
]
count = len(d.index)
full_count = len(station.index)
frequency = 0
if full_count > 0:
frequency = round(((count / full_count) * 100), 2)
accumulator = accumulator.append(
{
"sid": station["sid"].values[0],
"direction_class": direction,
"speed_range": bucket,
"count": count,
"frequency": frequency,
"decade": station["decade"].iloc[0],
"month": station["month"].iloc[0],
"pcount": pcount,
},
ignore_index=True,
)
accumulator = accumulator.astype(
{"direction_class": np.float32, "count": np.int32, "frequency": np.float32,}
)
return accumulator | 70adc8fe1ec4649ac6f58131f7bb893760cf6b8c | 965 |
def loadKiosk(eventid):
"""Renders kiosk for specified event."""
event = Event.get_by_id(eventid)
return render_template("/events/eventKiosk.html",
event = event,
eventid = eventid) | 19acab2648c1d32c5214a42797347d8563996abd | 966 |
def bson_encode(data: ENCODE_TYPES) -> bytes:
"""
Encodes ``data`` to bytes. BSON records in list are delimited by '\u241E'.
"""
if data is None:
return b""
elif isinstance(data, list):
encoded = BSON_RECORD_DELIM.join(_bson_encode_single(r) for r in data)
# We are going to put a delimiter right at the head as a signal that this is
# a list of bson files, even if it is only one record
encoded = BSON_RECORD_DELIM + encoded
return encoded
else:
return _bson_encode_single(data) | 1fe61cc9c38d34c42d20478671c179c8f76606b0 | 967 |
def _GetTailStartingTimestamp(filters, offset=None):
"""Returns the starting timestamp to start streaming logs from.
Args:
filters: [str], existing filters, should not contain timestamp constraints.
offset: int, how many entries ago we should pick the starting timestamp.
If not provided, unix time zero will be returned.
Returns:
str, A timestamp that can be used as lower bound or None if no lower bound
is necessary.
"""
if not offset:
return None
entries = list(logging_common.FetchLogs(log_filter=' AND '.join(filters),
order_by='DESC',
limit=offset))
if len(entries) < offset:
return None
return list(entries)[-1].timestamp | 0362df8948a1762e85cfaaa8c32565d9f1517132 | 968 |
def main(data_config_file, app_config_file):
"""Print delta table schemas."""
logger.info('data config: ' + data_config_file)
logger.info('app config: ' + app_config_file)
# load configs
ConfigSet(name=DATA_CFG, config_file=data_config_file)
cfg = ConfigSet(name=APP_CFG, config_file=app_config_file)
# get list of delta tables to load
tables = cfg.get_value(DATA_CFG + '::$.load_delta')
for table in tables:
path = table['path']
spark = SparkConfig().spark_session(config_name=APP_CFG, app_name="grapb_db")
df = spark.read.format('delta').load(path)
df.printSchema()
return 0 | de3247618664a38245a9ad60129dbe1881ee84c6 | 969 |
def porosity_to_n(porosity,GaN_n,air_n):
"""Convert a porosity to a refractive index. using the volume averaging theory"""
porous_n = np.sqrt((1-porosity)*GaN_n*GaN_n + porosity*air_n*air_n)
return porous_n | a4fa765b1870823731cefa5747a0078bbf4d4b4e | 970 |
def _indexing_coordi(data, coordi_size, itm2idx):
"""
function: fashion item numbering
"""
print('indexing fashion coordi')
vec = []
for d in range(len(data)):
vec_crd = []
for itm in data[d]:
ss = np.array([itm2idx[j][itm[j]] for j in range(coordi_size)])
vec_crd.append(ss)
vec_crd = np.array(vec_crd, dtype='int32')
vec.append(vec_crd)
return np.array(vec, dtype='int32') | b3ee0594c7090742ba2dcb65545a31cd73f7805b | 971 |
def plot_precentile(arr_sim, arr_ref, num_bins=1000, show_top_percentile=1.0):
""" Plot top percentile (as specified by show_top_percentile) of best restults
in arr_sim and compare against reference values in arr_ref.
Args:
-------
arr_sim: numpy array
Array of similarity values to evaluate.
arr_ref: numpy array
Array of reference values to evaluate the quality of arr_sim.
num_bins: int
Number of bins to divide data (default = 1000)
show_top_percentile
Choose which part to plot. Will plot the top 'show_top_percentile' part of
all similarity values given in arr_sim. Default = 1.0
"""
start = int(arr_sim.shape[0] * show_top_percentile / 100)
idx = np.argpartition(arr_sim, -start)
starting_point = arr_sim[idx[-start]]
if starting_point == 0:
print("not enough datapoints != 0 above given top-precentile")
# Remove all data below show_top_percentile
low_as = np.where(arr_sim < starting_point)[0]
length_selected = arr_sim.shape[0] - low_as.shape[0] # start+1
data = np.zeros((2, length_selected))
data[0, :] = np.delete(arr_sim, low_as)
data[1, :] = np.delete(arr_ref, low_as)
data = data[:, np.lexsort((data[1, :], data[0, :]))]
ref_score_cum = []
for i in range(num_bins):
low = int(i * length_selected / num_bins)
# high = int((i+1) * length_selected/num_bins)
ref_score_cum.append(np.mean(data[1, low:]))
ref_score_cum = np.array(ref_score_cum)
fig, ax = plt.subplots(figsize=(6, 6))
plt.plot(
(show_top_percentile / num_bins * (1 + np.arange(num_bins)))[::-1],
ref_score_cum,
color='black')
plt.xlabel("Top percentile of spectral similarity score g(s,s')")
plt.ylabel("Mean molecular similarity (f(t,t') within that percentile)")
return ref_score_cum | f2c024350ccba4dca83bb38ab6742d0e18cb7d3e | 972 |
def set_xfce4_shortcut_avail(act_args, key, progs):
"""Set the shortcut associated with the given key to the first available program"""
for cmdline in progs:
# Split the command line to find the used program
cmd_split = cmdline.split(None, 1)
cmd_split[0] = find_prog_in_path(cmd_split[0])
if cmd_split[0] is not None:
return set_xfce4_shortcut(act_args, key, ' '.join(cmd_split))
logger.warning("no program found for shortcut %s", key)
return True | 1b67e66fc7dd5b8aa4ca86dd8d7028af824b1cf7 | 973 |
def accesscontrol(check_fn):
"""Decorator for access controlled callables. In the example scenario where
access control is based solely on user names (user objects are `str`),
the following is an example usage of this decorator::
@accesscontrol(lambda user: user == 'bob')
def only_bob_can_call_this():
pass
Class methods are decorated in the same way.
:param check_fn: A callable, taking a user object argument, and
returning a boolean value, indicating whether the user (user object
argument) is allowed access to the decorated callable."""
if not callable(check_fn):
raise TypeError(check_fn)
def decorator(wrapped):
@wraps(wrapped)
def decorated(*args, **kwargs):
if ACL.current_user is None:
raise AccessDeniedError(decorated)
if not ACL.managed_funcs[decorated](ACL.current_user):
raise AccessDeniedError(decorated)
return wrapped(*args, **kwargs)
ACL.managed_funcs[decorated] = check_fn
return decorated
return decorator | ec0deb22e40d3a03e7c9fadbb6b7085b1c955925 | 974 |
import sys
def load_dataset(path):
"""
Load data from the file
:param: path: path to the data
:return: pd dataframes, train & test data
"""
if '.h5' in str(path):
dataframe = pd.read_hdf(path)
elif '.pkl' in str(path):
dataframe = pd.read_pickle(path)
else:
print('Wrong file')
sys.exit()
# Make it multiindex
dataframe['event'] = dataframe.index
dataframe = dataframe.set_index(['sample_nr', 'event'])
dataframe = dataframe.reset_index('event', drop=True)
dataframe = dataframe.set_index(dataframe.groupby(level=0).cumcount().rename('event'), append=True)
return dataframe | de3fa114e9ba18677de1c48ce21d3d0635581de8 | 975 |
def positionPctProfit():
"""
Position Percent Profit
The percentage profit/loss of each position. Returns a dictionary with
market symbol keys and percent values.
:return: dictionary
"""
psnpct = dict()
for position in portfolio:
# Strings are returned from API; convert to floating point type
current = float(position.current_price)
entry = float(position.avg_entry_price)
psnpct[position.symbol] = ((current - entry) / entry) * 100
return psnpct | b0abb40edeb6ff79abe29f916c6996e851627ab4 | 976 |
def _parse_fields(vel_field, corr_vel_field):
""" Parse and return the radar fields for dealiasing. """
if vel_field is None:
vel_field = get_field_name('velocity')
if corr_vel_field is None:
corr_vel_field = get_field_name('corrected_velocity')
return vel_field, corr_vel_field | 8a0d8a4148ddc3757bc437de3dc942fd6b4db1b3 | 977 |
def get_species_charge(species):
""" Returns the species charge (only electrons so far """
if(species=="electron"):
return qe
else:
raise ValueError(f'get_species_charge: Species "{species}" is not supported.') | 24b0f091973dc5165194fc3063256413f14cd372 | 978 |
from typing import Dict
from typing import Any
from typing import Callable
def orjson_dumps(
obj: Dict[str, Any], *, default: Callable[..., Any] = pydantic_encoder
) -> str:
"""Default `json_dumps` for TIA.
Args:
obj (BaseModel): The object to 'dump'.
default (Callable[..., Any], optional): The default encoder. Defaults to
pydantic_encoder.
Returns:
str: The json formatted string of the object.
"""
return orjson.dumps(obj, default=default).decode("utf-8") | b66cc4ea1ecd372711086cfeb831d690bcfa5ecd | 979 |
def KNN_classification(dataset, filename):
"""
Classification of data with k-nearest neighbors,
followed by plotting of ROC and PR curves.
Parameters
---
dataset: the input dataset, containing training and
test split data, and the corresponding labels
for binding- and non-binding sequences.
filename: an identifier to distinguish different
plots from each other.
Returns
---
stats: array containing classification accuracy, precision
and recall
"""
# Import and one hot encode training/test set
X_train, X_test, y_train, y_test = prepare_data(dataset)
# Fitting classifier to the training set
KNN_classifier = KNeighborsClassifier(
n_neighbors=100, metric='minkowski', p=2)
KNN_classifier.fit(X_train, y_train)
# Predicting the test set results
y_pred = KNN_classifier.predict(X_test)
y_score = KNN_classifier.predict_proba(X_test)
# ROC curve
title = 'KNN ROC curve (Train={})'.format(filename)
plot_ROC_curve(
y_test, y_score[:, 1], plot_title=title,
plot_dir='figures/KNN_ROC_Test_{}.png'.format(filename)
)
# Precision-recall curve
title = 'KNN Precision-Recall curve (Train={})'.format(filename)
plot_PR_curve(
y_test, y_score[:, 1], plot_title=title,
plot_dir='figures/KNN_P-R_Test_{}.png'.format(filename)
)
# Calculate statistics
stats = calc_stat(y_test, y_pred)
# Return statistics
return stats | b559ada6ace9c685cd7863a177f3f7224a5b5a69 | 980 |
def projection_error(pts_3d: np.ndarray, camera_k: np.ndarray, pred_pose: np.ndarray, gt_pose: np.ndarray):
"""
Average distance of projections of object model vertices [px]
:param pts_3d: model points, shape of (n, 3)
:param camera_k: camera intrinsic matrix, shape of (3, 3)
:param pred_pose: predicted rotation and translation, shape (3, 4), [R|t]
:param gt_pose: ground truth rotation and translation, shape (3, 4), [R|t]
:return: the returned error, unit is pixel
"""
# projection shape (n, 2)
pred_projection: np.ndarray = project_3d_2d(pts_3d=pts_3d, camera_intrinsic=camera_k, transformation=pred_pose)
gt_projection: np.ndarray = project_3d_2d(pts_3d=pts_3d, camera_intrinsic=camera_k, transformation=gt_pose)
error = np.linalg.norm(gt_projection - pred_projection, axis=1).mean()
return error | 846f8b468f180fcc2cd48c4ff7dc9ca21338b7b3 | 981 |
def ph_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the ph line plot after pulling new data.
"""
plt.cla()
update_data()
values = pd.Series(dataList[3])
if(high_contrast):
line = ax.plot(values, linewidth=3.0)
else:
line = ax.plot(values)
return line | 22b1648a2c2d5fc479cb23f2aa6365b0a2d9669c | 982 |
def get_percent_match(uri, ucTableName):
"""
Get percent match from USEARCH
Args:
uri: URI of part
ucTableName: UClust table
Returns: Percent match if available, else -1
"""
with open(ucTableName, 'r') as read:
uc_reader = read.read()
lines = uc_reader.splitlines()
for line in lines:
line = line.split()
if line[9] == uri:
return line[3]
return -1 | 259e9955b282baf74fa43bbea1aa7136e8b6e0f7 | 983 |
def get_rm_rf(earliest_date, symbol='000300'):
"""
Rm-Rf(市场收益 - 无风险收益)
基准股票指数收益率 - 国库券1个月收益率
输出pd.Series(日期为Index), 'Mkt-RF', 'RF'二元组
"""
start = '1990-1-1'
end = pd.Timestamp('today')
benchmark_returns = get_cn_benchmark_returns(symbol).loc[earliest_date:]
treasury_returns = get_treasury_data(start, end)['1month'][earliest_date:]
# 补齐缺省值
treasury_returns = treasury_returns.reindex(
benchmark_returns.index, method='ffill')
return benchmark_returns, treasury_returns | 4a9e03381ba8c0db40342b7848783d1610207270 | 984 |
async def detect_custom(model: str = Form(...), image: UploadFile = File(...)):
"""
Performs a prediction for a specified image using one of the available models.
:param model: Model name or model hash
:param image: Image file
:return: Model's Bounding boxes
"""
draw_boxes = False
try:
output = await dl_service.run_model(model, image, draw_boxes)
error_logging.info('request successful;' + str(output))
return output
except ApplicationError as e:
error_logging.warning(model + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error') | 9586682d04d71662c61b9c4c4cee248c7ff4998b | 985 |
import torch
def _get_top_ranking_propoals(probs):
"""Get top ranking proposals by k-means"""
dev = probs.device
kmeans = KMeans(n_clusters=5).fit(probs.cpu().numpy())
high_score_label = np.argmax(kmeans.cluster_centers_)
index = np.where(kmeans.labels_ == high_score_label)[0]
if len(index) == 0:
index = np.array([np.argmax(probs)])
return torch.from_numpy(index).to(dev) | f8b19f483b84b2ba1fa37811326a4f1b8c6be14b | 986 |
import os
def get_available_configs(config_dir, register=None):
"""
Return (or update) a dictionary *register* that contains all config files in *config_dir*.
"""
if register is None:
register = dict()
for config_file in os.listdir(config_dir):
if config_file.startswith('_') or not config_file.lower().endswith('.yaml'):
continue
name = os.path.splitext(config_file)[0]
config = load_yaml(os.path.join(config_dir, config_file))
config['base_catalog_dir'] = base_catalog_dir
if 'fn' in config:
config['fn'] = os.path.join(base_catalog_dir, config['fn'])
register[name] = config
return register | 6913439a6c70d6b1a35cf852e08db0e7db494fc8 | 987 |
import warnings
def test_simulated_annealing_for_valid_solution_warning_raised(slots, events):
"""
Test that a warning is given if a lower bound is passed and not reached in
given number of iterations.
"""
def objective_function(array):
return len(list(array_violations(array, events, slots)))
array = np.array([
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0]
])
assert objective_function(array) == 2
np.random.seed(0)
with warnings.catch_warnings(record=True) as w:
X = simulated_annealing(initial_array=array,
objective_function=objective_function,
lower_bound=0,
max_iterations=1)
assert objective_function(X) == 1
assert len(w) == 1 | 0236aa0795c976ba3c95d223ab558239dad0eefc | 988 |
from typing import Dict
from typing import List
async def find_quote_by_attributes(quotes: Dict[str, Quote], attribute: str, values: List[str]) -> Quote or None:
"""
Find a quote by its attributes
:param quotes: The dict containing all current quotes
:param attribute: the attribute by which to find the quote
:param values: the values of the attribute the quote has to match
:return: the Quote that has been found, None otherwise
"""
# TODO: implement this :)
return None | 76aba2d1ec9a20800c15617a739ae6087e47168c | 989 |
import os
def externals_test_setup(sbox):
"""Set up a repository in which some directories have the externals property,
and set up another repository, referred to by some of those externals.
Both repositories contain greek trees with five revisions worth of
random changes, then in the sixth revision the first repository --
and only the first -- has some externals properties set. ### Later,
test putting externals on the second repository. ###
The arrangement of the externals in the first repository is:
/A/B/ ==> ^/A/D/gamma gamma
/A/C/ ==> exdir_G <scheme>:///<other_repos>/A/D/G
../../../<other_repos_basename>/A/D/H@1 exdir_H
/A/D/ ==> ^/../<other_repos_basename>/A exdir_A
//<other_repos>/A/D/G/ exdir_A/G/
exdir_A/H -r 1 <scheme>:///<other_repos>/A/D/H
/<some_paths>/A/B x/y/z/blah
A dictionary is returned keyed by the directory created by the
external whose value is the URL of the external.
"""
# The test itself will create a working copy
sbox.build(create_wc = False)
svntest.main.safe_rmtree(sbox.wc_dir)
wc_init_dir = sbox.add_wc_path('init') # just for setting up props
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
other_repo_basename = os.path.basename(other_repo_dir)
# Get a scheme relative URL to the other repository.
scheme_relative_other_repo_url = other_repo_url[other_repo_url.find(':')+1:]
# Get a server root relative URL to the other repository by trimming
# off the first three /'s.
server_relative_other_repo_url = other_repo_url
for i in range(3):
j = server_relative_other_repo_url.find('/') + 1
server_relative_other_repo_url = server_relative_other_repo_url[j:]
server_relative_other_repo_url = '/' + server_relative_other_repo_url
# These files will get changed in revisions 2 through 5.
mu_path = os.path.join(wc_init_dir, "A/mu")
pi_path = os.path.join(wc_init_dir, "A/D/G/pi")
lambda_path = os.path.join(wc_init_dir, "A/B/lambda")
omega_path = os.path.join(wc_init_dir, "A/D/H/omega")
# These are the directories on which `svn:externals' will be set, in
# revision 6 on the first repo.
B_path = os.path.join(wc_init_dir, "A/B")
C_path = os.path.join(wc_init_dir, "A/C")
D_path = os.path.join(wc_init_dir, "A/D")
# Create a working copy.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_init_dir)
# Make revisions 2 through 5, but don't bother with pre- and
# post-commit status checks.
svntest.main.file_append(mu_path, "Added to mu in revision 2.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
svntest.main.file_append(pi_path, "Added to pi in revision 3.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
svntest.main.file_append(lambda_path, "Added to lambda in revision 4.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
svntest.main.file_append(omega_path, "Added to omega in revision 5.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
# Get the whole working copy to revision 5.
expected_output = svntest.wc.State(wc_init_dir, {
})
svntest.actions.run_and_verify_update(wc_init_dir,
expected_output, None, None)
# Now copy the initial repository to create the "other" repository,
# the one to which the first repository's `svn:externals' properties
# will refer. After this, both repositories have five revisions
# of random stuff, with no svn:externals props set yet.
svntest.main.copy_repos(repo_dir, other_repo_dir, 5)
# This is the returned dictionary.
external_url_for = { }
external_url_for["A/B/gamma"] = "^/A/D/gamma"
external_url_for["A/C/exdir_G"] = other_repo_url + "/A/D/G"
external_url_for["A/C/exdir_H"] = "../../../" + \
other_repo_basename + \
"/A/D/H@1"
# Set up the externals properties on A/B/, A/C/ and A/D/.
externals_desc = \
external_url_for["A/B/gamma"] + " gamma\n"
change_external(B_path, externals_desc, commit=False)
externals_desc = \
"exdir_G " + external_url_for["A/C/exdir_G"] + "\n" + \
external_url_for["A/C/exdir_H"] + " exdir_H\n"
change_external(C_path, externals_desc, commit=False)
external_url_for["A/D/exdir_A"] = "^/../" + other_repo_basename + "/A"
external_url_for["A/D/exdir_A/G/"] = scheme_relative_other_repo_url + \
"/A/D/G/"
external_url_for["A/D/exdir_A/H"] = other_repo_url + "/A/D/H"
external_url_for["A/D/x/y/z/blah"] = server_relative_other_repo_url + "/A/B"
externals_desc = \
external_url_for["A/D/exdir_A"] + " exdir_A" + \
"\n" + \
external_url_for["A/D/exdir_A/G/"] + " exdir_A/G/" + \
"\n" + \
"exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
external_url_for["A/D/x/y/z/blah"] + " x/y/z/blah" + \
"\n"
change_external(D_path, externals_desc, commit=False)
# Commit the property changes.
expected_output = svntest.wc.State(wc_init_dir, {
'A/B' : Item(verb='Sending'),
'A/C' : Item(verb='Sending'),
'A/D' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_init_dir, 5)
expected_status.tweak('A/B', 'A/C', 'A/D', wc_rev=6, status=' ')
svntest.actions.run_and_verify_commit(wc_init_dir,
expected_output,
expected_status,
None, wc_init_dir)
return external_url_for | f46b2bd96733bccc4ac5dc2a2c1b1998a68b5567 | 990 |
def six_bus(vn_high=20, vn_low=0.4, length_km=0.03, std_type='NAYY 4x50 SE', battery_locations=[3, 6], init_soc=0.5,
energy_capacity=20.0, static_feeds=None, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0,
storage_p_max=50.0, storage_p_min=-50.0):
"""This function creates the network model for the 6 bus POC network from scratch.
Buses and lines are added to an empty network based on a hard-coded topology and parameters from the config file
(seen as inputs). The only controllable storage added in this network are batteries, and the input static_feeds is
used to add loads and static generators which are not controlled by the agent. The first value in the series is
taken for initialization of those elements.
"""
net = pp.create_empty_network(name='6bus', f_hz=60., sn_kva=100.)
# create buses
for i in range(8):
nm = 'bus{}'.format(i)
if i == 0:
pp.create_bus(net, name=nm, vn_kv=vn_high)
elif i == 1:
pp.create_bus(net, name=nm, vn_kv=vn_low)
else:
if i <= 4:
zn = 'Side1'
else:
zn = 'Side2'
pp.create_bus(net, name=nm, zone=zn, vn_kv=vn_low)
# create grid connection
pp.create_ext_grid(net, 0)
# create lines
pp.create_line(net, 0, 1, length_km=length_km, std_type=std_type,
name='line0')
pp.create_line(net, 1, 2, length_km=length_km, std_type=std_type,
name='line1')
pp.create_line(net, 2, 3, length_km=length_km, std_type=std_type,
name='line2')
pp.create_line(net, 2, 4, length_km=length_km, std_type=std_type,
name='line3')
pp.create_line(net, 1, 5, length_km=length_km, std_type=std_type,
name='line4')
pp.create_line(net, 5, 6, length_km=length_km, std_type=std_type,
name='line5')
pp.create_line(net, 5, 7, length_km=length_km, std_type=std_type,
name='line6')
# add controllable storage
for idx, bus_number in enumerate(battery_locations):
energy_capacity_here = energy_capacity
init_soc_here = init_soc
if np.size(energy_capacity) > 1:
energy_capacity_here = energy_capacity[idx]
if np.size(init_soc) > 1:
init_soc_here = init_soc[idx]
add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here,
init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min)
# Add controllable generator
if gen_locations is not None:
for idx, bus_number in enumerate(gen_locations):
pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, min_p_kw=gen_p_min,
max_p_kw=gen_p_max)
##### TODO : Have different limits for different generators and storage #####
# add loads and static generation
if static_feeds is None:
print('No loads or generation assigned to network')
else:
if len(static_feeds) > 0:
for key, val in static_feeds.items():
init_flow = val[0]
print('init_flow: ', init_flow, 'at bus: ', key)
if init_flow > 0:
pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0)
else:
pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0)
return net | 966d040fb3fd453da5c680810838612a5988a816 | 991 |
from typing import Optional
def _add_exccess_het_filter(
b: hb.Batch,
input_vcf: hb.ResourceGroup,
overwrite: bool,
excess_het_threshold: float = 54.69,
interval: Optional[hb.ResourceGroup] = None,
output_vcf_path: Optional[str] = None,
) -> Job:
"""
Filter a large cohort callset on Excess Heterozygosity.
The filter applies only to large callsets (`not is_small_callset`)
Requires all samples to be unrelated.
ExcessHet estimates the probability of the called samples exhibiting excess
heterozygosity with respect to the null hypothesis that the samples are unrelated.
The higher the score, the higher the chance that the variant is a technical artifact
or that there is consanguinuity among the samples. In contrast to Inbreeding
Coefficient, there is no minimal number of samples for this annotation.
Returns: a Job object with a single output j.output_vcf of type ResourceGroup
"""
job_name = 'Joint genotyping: ExcessHet filter'
if utils.can_reuse(output_vcf_path, overwrite):
return b.new_job(job_name + ' [reuse]')
j = b.new_job(job_name)
j.image(utils.GATK_IMAGE)
j.memory('8G')
j.storage(f'32G')
j.declare_resource_group(
output_vcf={'vcf.gz': '{root}.vcf.gz', 'vcf.gz.tbi': '{root}.vcf.gz.tbi'}
)
j.command(
f"""set -euo pipefail
# Captring stderr to avoid Batch pod from crashing with OOM from millions of
# warning messages from VariantFiltration, e.g.:
# > JexlEngine - ![0,9]: 'ExcessHet > 54.69;' undefined variable ExcessHet
gatk --java-options -Xms3g \\
VariantFiltration \\
--filter-expression 'ExcessHet > {excess_het_threshold}' \\
--filter-name ExcessHet \\
{f'-L {interval} ' if interval else ''} \\
-O {j.output_vcf['vcf.gz']} \\
-V {input_vcf['vcf.gz']} \\
2> {j.stderr}
"""
)
if output_vcf_path:
b.write_output(j.output_vcf, output_vcf_path.replace('.vcf.gz', ''))
return j | a3ae37c5a6c930f5046600bf02fa6d980fbe8017 | 992 |
from pathlib import Path
from typing import Union
from typing import Tuple
from typing import Dict
def _get_config_and_script_paths(
parent_dir: Path,
config_subdir: Union[str, Tuple[str, ...]],
script_subdir: Union[str, Tuple[str, ...]],
file_stem: str,
) -> Dict[str, Path]:
"""Returns the node config file and its corresponding script file."""
if isinstance(config_subdir, tuple):
config_subpath = Path(*config_subdir)
else:
config_subpath = Path(config_subdir)
if isinstance(script_subdir, tuple):
script_subpath = Path(*script_subdir)
else:
script_subpath = Path(script_subdir)
return {
"config": parent_dir / config_subpath / f"{file_stem}.yml",
"script": parent_dir / script_subpath / f"{file_stem}.py",
} | 4f9a86ed4cf821f57f737336595a9521675f6866 | 993 |
def mongodb_get_users():
"""Connects to mongodb and returns users collection"""
# TODO parse: MONGOHQ_URL
connection = Connection(env['MONGODB_HOST'], int(env['MONGODB_PORT']))
if 'MONGODB_NAME' in env and 'MONGODB_PW' in env:
connection[env['MONGODB_DBNAME']].authenticate(env['MONGODB_NAME'], env['MONGODB_PW'])
return connection[env['MONGODB_DBNAME']].users | a0dea2588e0252d12748a7d9c77bd156b1165343 | 994 |
import requests
def macro_china_hk_cpi_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数年率
https://data.eastmoney.com/cjsj/foreign_8_1.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df | 1e117746a36b14ee3afe92057677bee2ca6f861f | 995 |
import json
def structuringElement(path):
"""
"""
with open(path) as f:
data = json.load(f)
data['matrix'] = np.array(data['matrix'])
data['center'] = tuple(data['center'])
return data | 99ce5d8321d037e591313aa6a7611479417e25c3 | 996 |
def ptsToDist(pt1, pt2):
"""Computes the distance between two points"""
if None in pt1 or None in pt2:
dist = None
else:
vx, vy = points_to_vec(pt1, pt2)
dist = np.linalg.norm([(vx, vy)])
return dist | 6329407bf7b84ffc835e67ffcb74823de2b33175 | 997 |
import os
import click
import sys
def init_check(func):
"""
Decorator for confirming the KAOS_STATE_DIR is present (i.e. initialized correctly).
"""
def wrapper(*args, **kwargs):
if not os.path.exists(KAOS_STATE_DIR):
click.echo("{} - {} directory does not exist - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(os.path.split(KAOS_STATE_DIR)[-1], bold=True, fg='red'),
click.style("kaos init", bold=True, fg='green')))
sys.exit(1)
if not os.path.exists(CONFIG_PATH):
click.echo("{} - {} does not exist - run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("./kaos/config", bold=True, fg='red'),
click.style("kaos init", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper | a8a86972e59e2180d6ac6599492a0ae3df51483a | 998 |
import torch
def d6_to_RotMat(aa:torch.Tensor) -> torch.Tensor: # take (...,6) --> (...,9)
"""Converts 6D to a rotation matrix, from: https://github.com/papagina/RotationContinuity/blob/master/Inverse_Kinematics/code/tools.py"""
a1, a2 = torch.split(aa, (3,3), dim=-1)
a3 = torch.cross(a1, a2, dim=-1)
return torch.cat((a1,a2,a3), dim=-1) | b0bf02737838a236bf55eb697a27d2cbc671b44c | 999 |