content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _order_points(pts: np.ndarray) -> np.ndarray:
"""Extract top left. top right, bottom left, bottom right of region
Args:
pts (np.ndarray[Tuple]): The coordinate of points
Returns:
np.ndarray: The coordinate of points.
"""
x_sorted = pts[np.argsort(pts[:, 0]), :]
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most
distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
(br, tr) = right_most[np.argsort(distance)[::-1], :]
return np.array([tl, tr, br, bl], dtype="float32") | 46dfb8a8e042929b2475bda2b01b39e5d871e02d | 2,100 |
import logging
import math
def to_image(obj):
""" allgemeine funktion zum anschauen von allen objekttypen (work in progress)
gibt image (numpy arry),description zurück
description sagt, was alles gemacht wurde um bild darzustellen
"""
descr = ""
if (tf.is_tensor(obj)):
obj = obj.numpy()
logger = logging.getLogger()
old_level = logger.level
logger.setLevel(100)
if obj.shape:
#print(f"Max {max(obj)}")
if len(obj.shape) == 2: # grayscale image
obj = norm(obj)
descr += f"Grayscale Image, mean:{obj.mean()}, var:{obj.var()} \n"
if (obj.var() < 0.01):
descr += f"Mean abgzogen {obj.mean()} \n"
obj = obj - obj.mean()
if (obj.mean() < 0.01):
i = 0
while (obj.mean() < 0.1 and obj.shape[0] > 10):
i += 1
obj = skimage.measure.block_reduce(obj, (2,2), np.max)
descr += f"Sehr dunkles Bild, maxpooling ({i} mal)"
# in "rgb" umwandeln
obj = np.stack((obj,)*3, axis=-1)
return obj,descr
elif len(obj.shape) == 3: # könnte ein bild sein
if obj.shape[0] == 3:
obj = np.transpose(obj,(1,2,0))
descr += "channel first \n"
if obj.shape[2] == 3: # normales bild
obj = norm(obj)
descr += f"Mean {obj.mean()}, Variance {obj.var()}\n"
if (obj.var() < 0.1):
obj = obj - obj.mean()
descr += f"Mean abgezogen \n"
if (obj.mean() < 0.1):
i= 0
while (obj.mean() < 0.1 and obj.shape[0] > 10):
i += 1
obj = skimage.measure.block_reduce(obj, (2,2,1), np.max)
descr += f"Bild zu dunkel, maxpooling ({i} mal)"
return obj,descr
else : ## feature map
## zeige ein paar davon
n = math.floor(math.sqrt(obj.shape[2]/3))
n = min(n,8)
f, axs = plt.subplots(n,n,figsize=(15,15))
descr += f"{obj.shape[2]} Feature Maps mit Shape {obj.shape[0:2]}"
print(f'Zeige {n*n*3} Feature Maps via RGB:')
for i in range(n*n):
r = norm(obj[:,:,i*3])
g = norm(obj[:,:,i*3+1])
b = norm(obj[:,:,i*3+2])
axs.flat[i].set_title(f'{i*3} - {i*3+3}')
axs.flat[i].imshow(np.moveaxis(np.array([r,g,b]), 0, 2)) # channels first -> channels last
#axs.flat[i].imshow(r,cmap='gray')
axs.flat[i].axis('off')
elif len(obj.shape) == 4 and obj.shape[0] == 3 and obj.shape[0] == 3: # convolution kernel
descr += f"Convolution Kernel {obj.shape}"
obj = np.transpose(obj,(2,3,0,1))
obj = np.reshape(obj,(obj.shape[0],-1,3))
#obj = obj[:,:,:3]
return to_image(obj)
else:
print("Tensor ",obj.shape)
print(obj)
logger.setLevel(old_level)
else:
return None, "Object of type "+str(type(obj)) | 4ae3be9758a647bbe2d0d2fedc080992840ab124 | 2,101 |
import os
import sys
import logging
import re
import signal
def start():
"""
Start the daemon.
"""
ret = 0
cfg = 'ludolph.cfg'
cfg_fp = None
cfg_lo = ((os.path.expanduser('~'), '.' + cfg), (sys.prefix, 'etc', cfg), ('/etc', cfg))
config_base_sections = ('global', 'xmpp', 'webserver', 'cron', 'ludolph.bot')
# Try to read config file from ~/.ludolph.cfg or /etc/ludolph.cfg
for i in cfg_lo:
try:
cfg_fp = open(os.path.join(*i))
except IOError:
continue
else:
break
if not cfg_fp:
sys.stderr.write("""\nLudolph can't start!\n
You need to create a config file in one these locations: \n%s\n
You can rename ludolph.cfg.example and update the required options.
The example file is located in: %s\n\n""" % (
'\n'.join([os.path.join(*i) for i in cfg_lo]),
os.path.dirname(os.path.abspath(__file__))))
sys.exit(1)
# Read and parse configuration
# noinspection PyShadowingNames
def load_config(fp, reopen=False):
config = RawConfigParser()
if reopen:
fp = open(fp.name)
try: # config.readfp() is Deprecated since python 3.2
# noinspection PyDeprecation
read_file = config.readfp
except AttributeError:
read_file = config.read_file
read_file(fp)
fp.close()
return config
config = load_config(cfg_fp)
# Prepare logging configuration
logconfig = {
'level': parse_loglevel(config.get('global', 'loglevel')),
'format': LOGFORMAT,
}
if config.has_option('global', 'logfile'):
logfile = config.get('global', 'logfile').strip()
if logfile:
logconfig['filename'] = logfile
# Daemonize
if config.has_option('global', 'daemon'):
if config.getboolean('global', 'daemon'):
ret = daemonize()
# Save pid file
if config.has_option('global', 'pidfile'):
try:
with open(config.get('global', 'pidfile'), 'w') as fp:
fp.write('%s' % os.getpid())
except Exception as ex:
# Setup logging just to show this error
logging.basicConfig(**logconfig)
logger.critical('Could not write to pidfile (%s)\n', ex)
sys.exit(1)
# Setup logging
logging.basicConfig(**logconfig)
# All exceptions will be logged without exit
def log_except_hook(*exc_info):
logger.critical('Unhandled exception!', exc_info=exc_info)
sys.excepthook = log_except_hook
# Default configuration
use_tls = True
use_ssl = False
address = []
# Starting
logger.info('Starting Ludolph %s (%s %s)', __version__, sys.executable, sys.version.split()[0])
logger.info('Loaded configuration from %s', cfg_fp.name)
# Load plugins
# noinspection PyShadowingNames
def load_plugins(config, reinit=False):
plugins = []
for config_section in config.sections():
config_section = config_section.strip()
if config_section in config_base_sections:
continue
# Parse other possible imports
parsed_plugin = config_section.split('.')
if len(parsed_plugin) == 1:
modname = 'ludolph.plugins.' + config_section
plugin = config_section
else:
modname = config_section
plugin = parsed_plugin[-1]
logger.info('Loading plugin: %s', modname)
try:
# Translate super_ludolph_plugin into SuperLudolphPlugin
clsname = plugin[0].upper() + re.sub(r'_+([a-zA-Z0-9])', lambda m: m.group(1).upper(), plugin[1:])
module = __import__(modname, fromlist=[clsname])
if reinit and getattr(module, '_loaded_', False):
reload(module)
module._loaded_ = True
imported_class = getattr(module, clsname)
if not issubclass(imported_class, LudolphPlugin):
raise TypeError('Plugin: %s is not LudolphPlugin instance' % modname)
plugins.append(Plugin(config_section, modname, imported_class))
except Exception as ex:
logger.exception(ex)
logger.critical('Could not load plugin: %s', modname)
return plugins
plugins = load_plugins(config)
# XMPP connection settings
if config.has_option('xmpp', 'host'):
address = [config.get('xmpp', 'host'), '5222']
if config.has_option('xmpp', 'port'):
address[1] = config.get('xmpp', 'port')
logger.info('Connecting to jabber server %s', ':'.join(address))
else:
logger.info('Using DNS SRV lookup to find jabber server')
if config.has_option('xmpp', 'tls'):
use_tls = config.getboolean('xmpp', 'tls')
if config.has_option('xmpp', 'ssl'):
use_ssl = config.getboolean('xmpp', 'ssl')
# Here we go
xmpp = LudolphBot(config, plugins=plugins)
signal.signal(signal.SIGINT, xmpp.shutdown)
signal.signal(signal.SIGTERM, xmpp.shutdown)
if hasattr(signal, 'SIGHUP'): # Windows does not support SIGHUP - bug #41
# noinspection PyUnusedLocal,PyShadowingNames
def sighup(signalnum, handler):
if xmpp.reloading:
logger.warning('Reload already in progress')
else:
xmpp.reloading = True
try:
config = load_config(cfg_fp, reopen=True)
logger.info('Reloaded configuration from %s', cfg_fp.name)
xmpp.prereload()
plugins = load_plugins(config, reinit=True)
xmpp.reload(config, plugins=plugins)
finally:
xmpp.reloading = False
signal.signal(signal.SIGHUP, sighup)
# signal.siginterrupt(signal.SIGHUP, false) # http://stackoverflow.com/a/4302037
if xmpp.client.connect(tuple(address), use_tls=use_tls, use_ssl=use_ssl):
xmpp.client.process(block=True)
sys.exit(ret)
else:
logger.error('Ludolph is unable to connect to jabber server')
sys.exit(2) | f6bb0a41bda524e20d9d2aec25a09259d8c7514b | 2,102 |
import re
def matchPP(a_string):
"""assumes a_string is a string
returns re match object if it finds two consecutive words that start with P,
else returns None"""
pattern = "[P|p]\w+\s[P|p]\w+"
result = re.search(pattern, a_string)
return result | c46eb4e0380a54cc36db0dc8969d17d65a546bf3 | 2,103 |
def setBoth(s1, s2):
"""
Sets both servo motors to specified number of degrees
Args:
s1, s2 (number): degrees for left and right servos respectively
must be between -90 and 90 and will be rounded
Raises:
Exception if s1 or s2 is not a number
Returns:
None
"""
s1 = restrictServoDegrees(s1)
s2 = restrictServoDegrees(s2)
return _setServos(s1, s2) | 16385e9a8ad23011e9c10f66677afb703f6d19ed | 2,104 |
import json
def transfer_shfe_future_hq(date, file_path, columns_map):
"""
将每天的数据统一标准
:return: pd.DataFrame 统一标准后的数据
"""
ret = pd.DataFrame()
data = json.loads(file_path.read_text())
hq_df = pd.DataFrame(data['o_curinstrument'])
total_df = pd.DataFrame(data['o_curproduct'])
bflag = hq_df.empty or len(hq_df.columns) < len(columns_map) or len(hq_df.columns) > 20
if bflag: # 原始数据文件为null,不重新下载,需要再运行一次程序
print('dce future hq data:{} is not exist, please rerun program!'.format(file_path.name))
return ret
settle_name = columns_map['settle']
hq_df = hq_df[hq_df[settle_name] != '']
hq_df = data_type_conversion(hq_df, 0, list(columns_map.values()), list(columns_map.keys()), date, 'shfe')
hq_df.loc[:, 'code'] = hq_df['code'].str.strip()
# 商品字母缩写转换
hq_df['code'] = hq_df['code'].transform(lambda x: NAME2CODE_MAP['exchange'][x])
# 构建symbol
hq_df['symbol'] = hq_df['code'] + hq_df['symbol'].transform(lambda x: convert_deliver(x, date))
# 计算amount
total_df['PRODUCTNAME'] = total_df['PRODUCTNAME'].str.strip()
total_df['AVGPRICE'] = pd.to_numeric(total_df['AVGPRICE'], downcast='float')
total_df['VOLUME'] = pd.to_numeric(total_df['VOLUME'], downcast='integer')
total_df['TURNOVER'] = pd.to_numeric(total_df['TURNOVER'], downcast='float')
total_df = total_df[total_df['AVGPRICE'] > 0]
total_df['code'] = total_df['PRODUCTNAME'].transform(lambda x: NAME2CODE_MAP['exchange'][x.strip()])
total_df['multiplier'] = total_df['TURNOVER'] / total_df['AVGPRICE'] / total_df['VOLUME'] * 100000000
total_df['multiplier'] = total_df['multiplier'].transform(round)
hq_df = hq_df.join(total_df[['code', 'multiplier']].set_index('code'), on='code')
hq_df['amount'] = hq_df['volume'] * hq_df['settle'] * hq_df['multiplier']
del hq_df['multiplier']
return hq_df | 4e90164f96d4c5018774c0ad8d4deda7fa6dbeec | 2,105 |
def comp_material_bsdf(arg_material_one:bpy.types.Material,
arg_material_two:bpy.types.Material) -> bool:
"""指定マテリアルのBSDFノードを比較する
受け渡したマテリアルの出力ノードに接続されたプリシプルBSDFノードを比較する
比較対象の入力端子のデフォルト値が有効、かつ、全て同一の場合、Trueを返す
Args:
arg_material_one (bpy.types.Material): 比較マテリアル1
arg_material_two (bpy.types.Material): 比較マテリアル2
Returns:
bool: 比較結果(一致:True)
"""
# マテリアルの出力ノードにプリンシプルBSDFノードが接続されているかチェックする
if check_surface_bsdf(arg_material_one) == False:
# プリシプルBSDF出なかった場合は処理を終了して False を返す
return False
# マテリアルの出力ノードにプリンシプルBSDFノードが接続されているかチェックする
if check_surface_bsdf(arg_material_two) == False:
# プリシプルBSDF出なかった場合、処理を終了して False を返す
return False
# プリンシプルBSDFノードを取得する
get_node_one = get_node_linkoutput(arg_material_one)
# プリンシプルBSDFノードを取得する
get_node_two = get_node_linkoutput(arg_material_two)
# 比較結果フラグ(デフォルトで一致判定)
comp_result = True
# 比較対象とする入力端子を全てチェックする
for bsdfnode_inputname in def_comp_bsdfnode_input_list:
# デフォルト値が有効なソケットの情報を取得する
nodesocket_one = get_nodesocket_enabledefault(arg_node=get_node_one, arg_inputname=bsdfnode_inputname)
nodesocket_two = get_nodesocket_enabledefault(arg_node=get_node_two, arg_inputname=bsdfnode_inputname)
# デフォルト値が有効なソケット情報を取得できたか確認する
if ((nodesocket_one == None) or (nodesocket_two == None)):
# ソケット情報を取得できなかった場合は不一致としてチェックを終了する
comp_result = False
break
# ソケットのタイプが同一か確認する
if (type(nodesocket_one) != type(nodesocket_two)):
# 同一でない場合は不一致としてチェックを終了する
comp_result = False
break
# タイプ毎の値比較の実施済みフラグ
checked_flg = False
# NodeSocketFloatのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketFloat):
# 値が一致するか比較する
if (nodesocket_one.default_value != nodesocket_two.default_value):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# NodeSocketFloatFactorのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketFloatFactor):
# 値が一致するか比較する
if (nodesocket_one.default_value != nodesocket_two.default_value):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# NodeSocketVectorのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketVector):
# 値が一致するか比較する
if ((nodesocket_one.default_value[0] != nodesocket_two.default_value[0]) or
(nodesocket_one.default_value[1] != nodesocket_two.default_value[1]) or
(nodesocket_one.default_value[2] != nodesocket_two.default_value[2])):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# NodeSocketColorのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketColor):
# 値が一致するか比較する
if ((nodesocket_one.default_value[0] != nodesocket_two.default_value[0]) or
(nodesocket_one.default_value[1] != nodesocket_two.default_value[1]) or
(nodesocket_one.default_value[2] != nodesocket_two.default_value[2]) or
(nodesocket_one.default_value[3] != nodesocket_two.default_value[3])):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# 値比較を実施済みか確認する
if checked_flg == False:
# 合致するタイプがない場合はBSDFでないと判断して不一致としてチェックを終了する
comp_result = False
break
return comp_result | 884c38c93ea4fd0c6907da0d2e5025a0980bed50 | 2,106 |
def run_filters():
"""Runs filters ('PAINS', 'ZINC', 'BRENK', 'NIH')for molecule selected.
Saves the information to the global molecule_info dict and returns the
information as its own dict.
Pass R Group IDs as queries: /filters?r1=A01&r2=B01
:returns: A json dictionary of the molecule, indexed
by the concatenated string of its R Group IDs, with the values for each
descriptor, with each key being its respective descriptor label.
:rtype: json dict
"""
filter_names = ['PAINS', 'ZINC', 'BRENK', 'NIH']
r_group_1_id = request.args.get('r1')
r_group_2_id = request.args.get('r2')
drug_mol = FinalMolecule(r_group_1_id, r_group_2_id)
drug_filters = drug_mol.filter_properties()
molecule_key = tuple2str((r_group_1_id, r_group_2_id))
filt_dict = {}
filt_dict[molecule_key] = {}
for label in filter_names:
if "filters" in molecule_info[molecule_key].keys():
pass
else:
molecule_info[molecule_key]["filters"] = {}
molecule_info[molecule_key]["filters"][label] = drug_filters[label]
filt_dict[molecule_key][label] = drug_filters[label]
return jsonify({"filter_dict": filt_dict}) | e1bc4719d412a73a7860f49978d47c459dc34d70 | 2,107 |
def read_template(engine, template_name):
"""Read template string from file and get path."""
template_file = get_template_file(engine, template_name)
template_string = template_file.read_text()
return template_string, template_file.parent | 3dc55309df1575d2af2e4794e03e2ba4ccd166a2 | 2,108 |
def get_qbert_v3_url(qbert_url, project_id):
"""Keystone only hands out a v1 url I need v3."""
qbert_v3_url = "{0}/v3/{1}".format(qbert_url[0:-3], project_id)
return qbert_v3_url | 423e1f7a601f4ecafbc7d52d1f95fd59195f193e | 2,109 |
def gen_all_holds(hand):
"""
Generate all possible choices of dice from hand to hold.
hand: sorted full yahtzee hand
Returns a set of tuples, where each tuple is sorted dice to hold
"""
# start off with the original hand in set
set_holds = set([(hand)])
# now iterate with all sub hands with one element removed
for item in hand:
list_hand = list(hand)
list_hand.remove(item)
# add to set_holds this sub hand
set_holds.add(tuple(list_hand))
# also add to set_holds the recursion of this sub hand
# set functionality also takes care of repeated sub hands
set_holds.update(gen_all_holds(tuple(list_hand)))
return set_holds | 5c8af5040f619fabef56918d399b5a1cab8893a4 | 2,110 |
def sndrcv(*args, **kwargs):
# type: (*Any, **Any) -> Tuple[SndRcvList, PacketList]
"""Scapy raw function to send a packet and receive its answer.
WARNING: This is an internal function. Using sr/srp/sr1/srp is
more appropriate in many cases.
"""
sndrcver = SndRcvHandler(*args, **kwargs)
return sndrcver.results() | 6918dbf09bef672b95bab83126e6e4c0ec99e3bf | 2,111 |
from typing import Optional
def get_by_name(db_session: Session, *, name: str) -> Optional[Action]:
"""Return action object based on action name.
Arguments:
db_session {Session} -- SQLAlchemy Session object
name {str} -- action name
Returns:
Optional[Action] -- Returns a Action object or nothing if it doesn't exist
"""
return db_session.query(Action).filter(Action.name == name).first() | fb8c758d401fe09a36b3d2687a0e8e886edac594 | 2,112 |
def langstring(value: str, language: str = "x-none") -> dict:
"""Langstring."""
return {
"langstring": {
"lang": language,
"#text": value,
}
} | dca23a329cfc87d8cfa52cd2b009ce723b7d2270 | 2,113 |
def chinese_half2full():
"""Convert all halfwidth Chinese characters to fullwidth .
Returns:
"""
def string_op(input_str:str):
rstring = ""
for uchar in input_str:
u_code = ord(uchar)
if u_code == 32:
u_code = 12288
elif 33 <= u_code <= 126:
u_code += 65248
rstring += chr(u_code)
return rstring
return string_op | e89a6314a57192e62b32e1f7e044a09700b5bb73 | 2,114 |
def euclidean_distance(p1, p2):
"""
Returns the Euclidean Distance of a particular point from rest of the points in dataset.
"""
distance = 0
for i in range(len(p1)-1):
distance += (p1[i]-p2[i])**(2)
return sqrt(distance) | dd06e44659fdd06972bd6a660afeb313de81c6fe | 2,115 |
import argparse
def get_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description="""A simple popup calendar""")
parser.add_argument(
"-p",
"--print",
help="print date to stdout instead of opening a note",
action="store_true",
)
parser.add_argument(
"-f",
"--format",
help="""option '-p' output format (datetime.strftime format, defaut='%%Y-{%%m}-%%d')""",
dest="format",
default="%Y-%m-%d",
)
parser.add_argument(
"-e",
"--editor",
help="""editor command to open notes""",
dest="editor",
default="xdg-open",
)
parser.add_argument(
"-l",
"--locale",
help="""force system locale, for example '-l es_ES.utf8'""",
dest="locale",
default="",
)
parser.add_argument(
"-c",
"--read-cache",
dest="is_force_read_cache",
action="store_true",
help="""force calendar to read old date from cache"""
)
parser.add_argument(
"-t",
"--theme",
help="""set calendar theme, default=classic_dark (theme file name without extention)""",
dest="theme"
)
args, unknown = parser.parse_known_args()
unknown = unknown if len(unknown) == 0 else "".join(unknown).strip(' ')
return args, unknown | 7e7940001679e05f137798d127f54c9ab7512a63 | 2,116 |
def img_histogram(file):
"""
Returns an image's histogram in a combined RGB channel and each individual
channel as an array of 256 values.
A 0 means that a tonal value is the max and 255 means there are 0 pixels at that value.
"""
with Image.open(file) as img:
histogram = img.histogram()
red_histogram = histogram[0:256]
red_max = max(red_histogram)
green_histogram = histogram[256:512]
green_max = max(green_histogram)
blue_histogram = histogram[512:768]
blue_max = max(blue_histogram)
rgb_histogram = []
for i in range(256):
rgb_histogram.append(red_histogram[i] + green_histogram[i] + blue_histogram[i])
rgb_max = max(rgb_histogram)
for i in range(256):
r = red_histogram[i]
g = green_histogram[i]
b = blue_histogram[i]
rgb = rgb_histogram[i]
rgb_histogram[i] = round(255 - (rgb * 255 / rgb_max), 2)
red_histogram[i] = round(255 - (r * 255 / red_max), 2)
green_histogram[i] = round(255 - (g * 255 / green_max), 2)
blue_histogram[i] = round(255 - (b * 255 / blue_max), 2)
return rgb_histogram, red_histogram, green_histogram, blue_histogram | 1f210316e752328190978f908143dd40c9ef6ba4 | 2,117 |
def absModuleToDist(magApp, magAbs):
"""
Convert apparent and absolute magnitude into distance.
Parameters
----------
magApp : float
Apparent magnitude of object.
magAbs : float
Absolute magnitude of object.
Returns
-------
Distance : float
The distance resulting from the difference in
apparent and absolute magnitude [pc].
"""
d = 10.0**(-(magAbs - magApp) / 5.0 + 1.0)
return d | a7d98ff479114f08e47afefc97a1119f5e8ff174 | 2,118 |
import base64
def decoded_anycli(**kwargs):
"""
Return the decoded return from AnyCLI request - Do not print anything
:param kwargs:
keyword value: value to display
:return: return the result of AnyCLI in UTF-8
:Example:
result = cli(url=base_url, auth=s, command="show vlan")
decoded_anycli(result)
"""
value = kwargs.get('value', None)
return base64.b64decode(value['result_base64_encoded']).decode('utf-8') | 223c4f9aabfef530896729205071e7fb8f9c8301 | 2,119 |
def job_results_html(request):
"""
Used for testing the update with debug toolbar.
"""
response = job_results(request)
return render(request, 'ci/ajax_test.html', {'content': response.content}) | eed4b66d8227f8256847484d2dc01f4dcd3b3afa | 2,120 |
import pandas
def open_mcrae_nature_cohort():
""" get proband details for McRae et al., Nature 2017
McRae et al Nature 2017 542:433-438
doi: 10.1038/nature21062
Supplementary table S1.
"""
data = pandas.read_excel(url, sheet_name='Supplementary Table 1')
data['Individual ID'] += '|DDD'
phenotype = ['HP:0001249']
study = ['10.1038/nature21062']
persons = set()
for i, row in data.iterrows():
person = Person(row['Individual ID'], row.Sex, phenotype, study)
persons.add(person)
persons = add_mock_probands(persons, 4293, 'ddd', 'DDD', phenotype, study)
return persons | 8485fdc09c92bab20fc380a14f549f028be950b7 | 2,121 |
def copia_coords_alineadas(align1,align2,coords_molde,PDBname):
""" Devuelve:
1) una lista con las coordenadas de coords_molde
que se pueden copiar segun el alineamiento align1,align2.
2) una estimacion del RMSD segun la curva RMSD(A) = 0.40 e^{l.87(1-ID)}
de Chothia & Lesk (1986) """
aanames = { "A":"ALA","C":"CYS","D":"ASP","E":"GLU","F":"PHE","G":"GLY",
"H":"HIS","I":"ILE","K":"LYS","L":"LEU","M":"MET","N":"ASN","P":"PRO",
"Q":"GLN","R":"ARG","S":"SER","T":"THR","V":"VAL","W":"TRP","Y":"TYR" }
rmsd,identical = 0,0
total1,total2,total_model = -1,-1,0
length = len(align1)
if(length != len(align2)):
print "# copia_coords_alineadas: alineamientos tienen != longitud",
return []
pdbfile = open(PDBname, 'w')
print >> pdbfile, "HEADER comparative model\nREMARK alignment:\n",
print >> pdbfile, "REMARK query : %s\n" % (align1),
print >> pdbfile, "REMARK template: %s\n" % (align2),
for r in range(0, length):
conserved = False
res1 = align1[r:r+1]
res2 = align2[r:r+1]
if(res1 != '-'): total1+=1
if(res2 != '-'): total2+=1
if(res1 == '-' or res2 == '-'): continue # salta los gaps
total_model += 1.0;
if(res1 == res2):
conserved = True
identical += 1.0
for atomo in coords_molde[total2].split("\n"):
if(atomo == ''): break
if(atomo[12:16] == ' CA ' or atomo[12:16] == ' C ' or \
atomo[12:16] == ' N ' or atomo[12:16] == ' O ' \
or conserved):
print >> pdbfile, "%s%s%s%4d%s" % \
(atomo[0:17],aanames[res1],atomo[20:22],total1+1,atomo[26:])
print >> pdbfile, "TER\n",
pdbfile.close()
rmsd = 0.40 * exp(1.87*(1-(identical/total_model)))
identical = (identical/total_model)
return (total_model,identical,rmsd) | 48c730b43dd7059b6a6d7a068d884ecd27d3820e | 2,122 |
def get_amati_relationship(value='o'):
"""
Return the Amati relationship and it's 1 sigma dispersion as given by Tsutsui et al. (2009).
:param value: a string that can be 'o', '+', or '-'. The default is set to 'o' for the actual Amati relationship.
'+' gives the upper bound of uncertainty and '-' gives the lower bound of uncertainty.
:return: returns arrays of the a and y values of the amati relation/ error in the relation
"""
#plot the amati relation given by:
#http://iopscience.iop.org/article/10.1088/1475-7516/2009/08/015/pdf
x=np.linspace(-3,3,100) #log(E_iso/10**52), for caluclation of E_p, add 52 to x @ end to get back normal values
if value=='o':
y=(1/2.01)*(x+3.87) #y is log(E_p/1keV)
elif value=='+':
y=(1/(2.01))*(x+(3.87+0.33))
elif value=='-':
y=(1/(2.01))*(x+(3.87-0.33))
else:
print('This isnt a correct option for value\n')
return 1e52*10**x,10**y | f7618f812dca45640376177383af2443085b6246 | 2,123 |
def load(name, final=False, torch=False, prune_dist=None):
"""
Returns the requested dataset.
:param name: One of the available datasets
:param final: Loads the test/train split instead of the validation train split. In this case the training data
consists of both training and validation.
:return: A pair (triples, meta). `triples` is a numpy 2d array of datatype uint32 contianing integer-encoded
triples. `meta` is an object of metadata containing the following fields:
* e: The number of entities
* r: The number of relations
* i2r:
"""
if name == 'micro':
return micro(final, torch)
# -- a miniature dataset for unit testing
if name in ['aifb', 'am1k', 'amplus', 'dblp', 'mdgenre', 'mdgender', 'dmgfull', 'dmg777k']:
tic()
data = Data(here(f'../datasets/{name}'), final=final, use_torch=torch)
print(f'loaded data {name} ({toc():.4}s).')
else:
raise Exception(f'Dataset {name} not recognized.')
if prune_dist is not None:
tic()
data = prune(data, n=prune_dist)
print(f'pruned ({toc():.4}s).')
return data | 38f379076ba6f5562ab818113b319276f84bd081 | 2,124 |
def is_paragraph_debian_packaging(paragraph):
"""
Return True if the `paragraph` is a CopyrightFilesParagraph that applies
only to the Debian packaging
"""
return isinstance(
paragraph, CopyrightFilesParagraph
) and paragraph.files.values == ['debian/*'] | 726cd3d8c7cdfd14a55dc8bc9764cc9d037b1b63 | 2,125 |
def update_b(b, action_prob, yr_val, predict_mode):
"""Update new shape parameters b using the regression and classification output.
Args:
b: current shape parameters values. [num_examples, num_shape_params].
action_prob: classification output. [num_actions]=[num_examples, 2*num_shape_params]
yr_val: values of db to regress. yr=b-b_gt. [num_examples, num_shape_params]
predict_mode: 0: Hard classification. Move regressed distance only in the direction with maximum probability.
1: Soft classification. Multiply classification probabilities with regressed distances.
2: Regression only.
3: Classification only.
Returns:
b_new: new b after update. [num_examples, num_shape_params]
"""
if predict_mode == 0:
# Hard classification. Move regressed distance only in the direction with maximum probability.
ind = np.argmax(np.amax(np.reshape(action_prob, (b.shape[0], b.shape[1], 2)), axis=2), axis=1) # ind = [num_examples]
row_ind = np.arange(b.shape[0])
b[row_ind, ind] = b[row_ind, ind] - yr_val[row_ind, ind]
elif predict_mode == 1:
# Soft classification. Multiply classification probabilities with regressed distances.
b = b - yr_val * np.amax(np.reshape(action_prob, (b.shape[0], b.shape[1], 2)), axis=2)
elif predict_mode == 2:
# Regression only.
b = b - yr_val
elif predict_mode == 3:
# Classification only
step = 1
action_prob_reshape = np.reshape(action_prob, (b.shape[0], b.shape[1], 2))
ind = np.argmax(np.amax(action_prob_reshape, axis=2), axis=1) # ind=[num_examples]
row_ind = np.arange(b.shape[0])
is_negative = np.argmax(action_prob_reshape[row_ind, ind], axix=1) # is_negative=[num_examples]
# Move b in either positive or negative direction
b[row_ind[is_negative], ind[is_negative]] = b[row_ind[is_negative], ind[is_negative]] + step
b[row_ind[np.logical_not(is_negative)], ind[np.logical_not(is_negative)]] = b[row_ind[np.logical_not(is_negative)], ind[np.logical_not(is_negative)]] - step
return b | ba8535d538ae0e0ac44c452f2fbe94a686b8e5a1 | 2,126 |
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.merge_from_list(['MODEL.BUA.EXTRACT_FEATS',True])
cfg.merge_from_list(switch_extract_mode(args.extract_mode))
cfg.merge_from_list(set_min_max_boxes(args.min_max_boxes, args.mode))
cfg.freeze()
default_setup(cfg, args)
return cfg | 9dd4495a13c64d4832b889abdf94ffd01133c92a | 2,127 |
def _earth_distance(time='now'):
"""
Return the distance between the Sun and the Earth at a specified time.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
out : `~astropy.coordinates.Distance`
The Sun-Earth distance
"""
return get_earth(time).radius | c8646b7e2aa9b821a9740235d5cc263623bd0ec0 | 2,128 |
async def DELETE_Link(request):
"""HTTP method to delete a link"""
log.request(request)
app = request.app
group_id = request.match_info.get('id')
if not group_id:
msg = "Missing group id"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
if not isValidUuid(group_id, obj_class="Group"):
msg = f"Invalid group id: {group_id}"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
link_title = request.match_info.get('title')
validateLinkName(link_title)
username, pswd = getUserPasswordFromRequest(request)
await validateUserPassword(app, username, pswd)
domain = getDomainFromRequest(request)
if not isValidDomain(domain):
msg = f"domain: {domain}"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
bucket = getBucketForDomain(domain)
await validateAction(app, domain, group_id, username, "delete")
req = getDataNodeUrl(app, group_id)
req += "/groups/" + group_id + "/links/" + link_title
params = {}
if bucket:
params["bucket"] = bucket
rsp_json = await http_delete(app, req, params=params)
resp = await jsonResponse(request, rsp_json)
log.response(request, resp=resp)
return resp | 193d6cb86a820a7492c768aad0a0e22fac76198f | 2,129 |
def format_image(image):
"""
Function to format frame
"""
if len(image.shape) > 2 and image.shape[2] == 3:
# determine whether the image is color
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
# Image read from buffer
image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
cascade_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = cascade_classifier.detectMultiScale(image,scaleFactor = 1.3 ,minNeighbors = 5)
if not len(faces) > 0:
return None
# initialize the first face as having maximum area, then find the one with max_area
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
face = max_area_face
# extract ROI of face
image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
try:
# resize the image so that it can be passed to the neural network
image = cv2.resize(image, (48,48), interpolation = cv2.INTER_CUBIC) / 255.
except Exception:
print("----->Problem during resize")
return None
return image | 1649814cddab0037f89936d1a39af44d8d5203d9 | 2,130 |
def cpu_stats():
"""Return various CPU stats as a named tuple."""
ctx_switches, interrupts, syscalls, traps = cext.cpu_stats()
soft_interrupts = 0
return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
syscalls) | afdc9e95ba5d0b7760a1bbdf505b85f3fb0a0b7d | 2,131 |
def chi_squared(source_frequency, target_frequency):
"""Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
"""
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result | f076e78bc283a0f1ab8c2cd80a4a441ca1fb2692 | 2,132 |
def has_reacted(comment, user, reaction):
"""
Returns whether a user has reacted with a particular reaction on a comment or not.
"""
if user.is_authenticated:
reaction_type = getattr(ReactionInstance.ReactionType, reaction.upper(), None)
if not reaction_type:
raise template.TemplateSyntaxError(ReactionError.TYPE_INVALID.format(reaction_type=reaction))
return ReactionInstance.objects.filter(
user=user,
reaction_type=reaction_type.value,
reaction__comment=comment
).exists()
return False | 8cf537b204ae13c844e80a14b29f11e36d69097b | 2,133 |
import requests
def structure_query(compound, label='pyclassyfire'):
"""Submit a compound information to the ClassyFire service for evaluation
and receive a id which can be used to used to collect results
:param compound: The compound structures as line delimited inchikey or
smiles. Optionally a tab-separated id may be prepended for each
structure.
:type compound: str
:param label: A label for the query
:type label:
:return: A query ID number
:rtype: int
>>> structure_query('CCC', 'smiles_test')
>>> structure_query('InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)')
"""
r = requests.post(url + '/queries.json', data='{"label": "%s", '
'"query_input": "%s", "query_type": "STRUCTURE"}'
% (label, compound),
headers={"Content-Type": "application/json"})
r.raise_for_status()
return r.json()['id'] | cd7c0558dd61f493187169cea3562c96f63634d2 | 2,134 |
def ParseExistingMessageIntoMessage(message, existing_message, method):
"""Sets fields in message based on an existing message.
This function is used for get-modify-update pattern. The request type of
update requests would be either the same as the response type of get requests
or one field inside the request would be the same as the get response.
For example:
1) update.request_type_name = ServiceAccount
get.response_type_name = ServiceAccount
2) update.request_type_name = updateInstanceRequest
updateInstanceRequest.instance = Instance
get.response_type_name = Instance
If the existing message has the same type as the message to be sent for the
request, then return the existing message instead. If they are different, find
the field in the message which has the same type as existing_message, then
assign exsiting message to that field.
Args:
message: the apitools message to construct a new request.
existing_message: the exsting apitools message returned from server.
method: APIMethod, the method to generate request for.
Returns:
A modified apitools message to be send to the method.
"""
if type(existing_message) == type(message): # pylint: disable=unidiomatic-typecheck
return existing_message
# For read-modify-update api calls, the field name would be the same level
# or the next level of the request.
# TODO(b/111069150): refactor this part, don't hard code.
existing_message_name = type(existing_message).__name__
field_name = existing_message_name[0].lower() + existing_message_name[1:]
field_path = ''
if method.request_field != field_name:
field_path += method.request_field
field_path += '.'
field_path += field_name
SetFieldInMessage(message, field_path, existing_message)
return message | 7c2e4d10be831106834aa519f4abed945ca85589 | 2,135 |
def create(*, db_session, ticket_in: TicketCreate) -> Ticket:
"""Creates a new ticket."""
ticket = Ticket(**ticket_in.dict())
db_session.add(ticket)
db_session.commit()
return ticket | 644bcccc56c8fd97ec3c888f6e38c1fc2afc3585 | 2,136 |
def blur(img):
"""
:param img: SimpleImage, an original image.
:return: img: SimpleImage, image with blurred effect.
"""
blank_img = SimpleImage.blank(img.width, img.height)
for y in range(img.height):
for x in range(img.width):
blurred = blank_img.get_pixel(x, y)
if x == 0 and y == 0:
"""
For 4 corners.
The new RGB values of original pixel is the average RGB values
of the original pixel and the other pixels around it.
"""
avg_red1 = (img.get_pixel(x, y).red +
img.get_pixel(x + 1, y).red +
img.get_pixel(x, y + 1).red +
img.get_pixel(x + 1, y + 1).red) / 4
avg_green1 = (img.get_pixel(x, y).green +
img.get_pixel(x + 1, y).green +
img.get_pixel(x, y + 1).green +
img.get_pixel(x + 1, y + 1).green) / 4
avg_blue1 = (img.get_pixel(x, y).blue +
img.get_pixel(x + 1, y).blue +
img.get_pixel(x, y + 1).blue +
img.get_pixel(x + 1, y + 1).blue) / 4
blurred.red = avg_red1
blurred.green = avg_green1
blurred.blue = avg_blue1
elif x == 0 and y == blank_img.height - 1:
avg_red2 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x + 1, y).red) / 4
avg_green2 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x + 1, y).green) / 4
avg_blue2 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x + 1, y).blue) / 4
blurred.red = avg_red2
blurred.green = avg_green2
blurred.blue = avg_blue2
elif x == blank_img.width - 1 and y == 0:
avg_red3 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red) / 4
avg_green3 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green) / 4
avg_blue3 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue) / 4
blurred.red = avg_red3
blurred.green = avg_green3
blurred.blue = avg_blue3
elif x == blank_img.width - 1 and y == blank_img.height - 1:
avg_red4 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x - 1, y).red) / 4
avg_green4 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x - 1, y).green) / 4
avg_blue4 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x - 1, y).blue) / 4
blurred.red = avg_red4
blurred.green = avg_green4
blurred.blue = avg_blue4
elif x == 0 and 0 < y < blank_img.height - 1:
"""
For 4 edges.
The new RGB values of original pixel is the average RGB values
of the original pixel and the other pixels around it.
"""
avg_red5 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x + 1, y).red +
img.get_pixel(x + 1, y + 1).red +
img.get_pixel(x, y + 1).red) / 5
avg_green5 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x + 1, y).green +
img.get_pixel(x + 1, y + 1).green +
img.get_pixel(x, y + 1).green) / 5
avg_blue5 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x + 1, y).blue +
img.get_pixel(x + 1, y + 1).blue +
img.get_pixel(x, y + 1).blue) / 5
blurred.red = avg_red5
blurred.green = avg_green5
blurred.blue = avg_blue5
elif x == blank_img.width - 1 and 0 < y < blank_img.height - 1:
avg_red6 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red) / 6
avg_green6 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green) / 6
avg_blue6 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue) / 6
blurred.red = avg_red6
blurred.green = avg_green6
blurred.blue = avg_blue6
elif y == 0 and 0 < x < blank_img.width - 1:
avg_red7 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red +
img.get_pixel(x + 1, y + 1).red +
img.get_pixel(x + 1, y).red) / 6
avg_green7 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green +
img.get_pixel(x + 1, y + 1).green +
img.get_pixel(x + 1, y).green) / 6
avg_blue7 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue +
img.get_pixel(x + 1, y + 1).blue +
img.get_pixel(x + 1, y).blue) / 6
blurred.red = avg_red7
blurred.green = avg_green7
blurred.blue = avg_blue7
elif y == blank_img.height - 1 and 0 < x < blank_img.width - 1:
avg_red8 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x + 1, y).red) / 6
avg_green8 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x + 1, y).green) / 6
avg_blue8 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x + 1, y).blue) / 6
blurred.red = avg_red8
blurred.green = avg_green8
blurred.blue = avg_blue8
else:
"""
For other area except the corners and edges.
The new RGB values of original pixel is the average RGB values
of the other pixels around it.
"""
avg_red9 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x + 1, y).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red +
img.get_pixel(x + 1, y + 1).red) / 9
avg_green9 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x + 1, y).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green +
img.get_pixel(x + 1, y + 1).red) / 9
avg_blue9 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x + 1, y).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue +
img.get_pixel(x + 1, y + 1).blue) / 9
blurred.red = avg_red9
blurred.green = avg_green9
blurred.blue = avg_blue9
return blank_img | 9a7ac5085aea610a26a626e1d53bd243de19ad9e | 2,137 |
def trans_pressure(src, dest="bar"):
"""
>>>
"""
return trans_basic_unit(src, dest, "pressure") | 120888c024e6158a6e26ab699f7f4b5583cbf243 | 2,138 |
import subprocess
def run_cmd(command: list) -> None:
"""Run `command` using `subprocess.Popen()`."""
show_info(f"Command: {' '.join(command)}")
if DRY_RUN:
show_info("Dry run mode enabled - won't run")
else:
try:
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
except Exception as exc:
show_error(exc, exit=1)
finally:
return stdout.decode("utf-8").rstrip("\n") | 3c17796f9d758c42989b594ed2796e1834f4ea2e | 2,139 |
def test_accelerated_bypass_method_against_old(c_ctrl_rr):
"""Confirm that my changes to the bypass method maintain the same
result as the old method"""
OLD_HTCONSTS = dassh.region_rodded.calculate_ht_constants(c_ctrl_rr)
def _calc_coolant_byp_temp_old(self, dz):
"""Calculate the coolant temperatures in the assembly bypass
channels at the axial level j+1
Parameters
----------
self : DASSH RoddedRegion object
dz : float
Axial step size (m)
Notes
-----
The coolant in the bypass channels is assumed to get no
power from neutron/gamma heating (that contribution to
coolant in the assembly interior is already small enough).
"""
# Calculate the change in temperature in each subchannel
dT = np.zeros((self.n_bypass,
self.subchannel.n_sc['bypass']['total']))
# self._update_coolant_byp_params(self.avg_coolant_byp_temp)
for i in range(self.n_bypass):
# This factor is in many terms; technically, the mass flow
# rate is already accounted for in constants defined earlier
# mCp = self.coolant.heat_capacity
# starting index to lookup type is after all interior
# coolant channels and all preceding duct and bypass
# channels
start = (self.subchannel.n_sc['coolant']['total']
+ self.subchannel.n_sc['duct']['total']
+ i * self.subchannel.n_sc['bypass']['total']
+ i * self.subchannel.n_sc['duct']['total'])
# end = start + self.subchannel.n_sc['bypass']['total']
for sci in range(0, self.subchannel.n_sc['bypass']['total']):
# The value of sci is the PYTHON indexing
# type_i = self.subchannel.type[sci + start] - 1
type_i = self.subchannel.type[sci + start]
# Heat transfer to/from adjacent subchannels
for adj in self.subchannel.sc_adj[sci + start]:
# if adj == 0:
if adj == -1:
continue
# type_a = self.subchannel.type[adj - 1] - 1
type_a = self.subchannel.type[adj]
# Convection to/from duct wall
# if type_a in [3, 4]:
if 3 <= type_a <= 4:
if sci + start > adj: # INTERIOR adjacent duct wall
byp_conv_const = \
OLD_HTCONSTS[type_i][type_a][i][0]
byp_conv_dT = \
(self.temp['duct_surf'][i, 1, sci]
- self.temp['coolant_byp'][i, sci])
else: # EXTERIOR adjacent duct wall
byp_conv_const = \
OLD_HTCONSTS[type_i][type_a][i][1]
byp_conv_dT = \
(self.temp['duct_surf'][i + 1, 0, sci]
- self.temp['coolant_byp'][i, sci])
dT[i, sci] += \
(self.coolant_byp_params['htc'][i, type_i - 5]
* dz * byp_conv_const * byp_conv_dT
/ self.coolant.heat_capacity)
# Conduction to/from adjacent coolant subchannels
else:
# sc_adj = adj - start - 1
sc_adj = adj - start
dT[i, sci] += \
(self.coolant.thermal_conductivity
* dz
* OLD_HTCONSTS[type_i][type_a][i]
* (self.temp['coolant_byp'][i, sc_adj]
- self.temp['coolant_byp'][i, sci])
/ self.coolant.heat_capacity)
return dT
dT = np.zeros(c_ctrl_rr.temp['coolant_byp'].shape)
dT_old = dT.copy()
dz = 0.01
start_temp = 623.15
for i in range(50):
duct_surf_temp = \
(np.random.random(c_ctrl_rr.temp['duct_surf'].shape)
+ (start_temp + i * 1.0))
c_ctrl_rr.temp['duct_surf'] = duct_surf_temp
dT_old += _calc_coolant_byp_temp_old(c_ctrl_rr, dz)
dT += c_ctrl_rr._calc_coolant_byp_temp(dz)
print(np.average(dT))
print(np.average(dT_old))
print('max abs diff: ', np.max(np.abs(dT - dT_old)))
assert np.allclose(dT, dT_old) | db6660b8ddc2f7ea409f7b334e4e161fceb743b2 | 2,140 |
from pathlib import Path
def retrieve_config(): # TODO: is this being used?
"""Retrieve configuration data.
Args:
None
Returns:
dict: The dictionary with configuration settings
"""
config = {}
# go 2 layer up
util_path = Path(__file__).parents[3]
config_path = util_path / 'configuration' / 'configuration-aml.variables.yml'
config = read_config_file(config_path)
return config['variables'] | 3b1519e7e8caa29878aaecabeb098d166bcf763c | 2,141 |
import logging
def vraec18(pretrained=False, **kwargs):
"""Constructs a _ResAE-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = _VRAEC(_VariationalBasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
except Exception as exp:
logging.warning(exp)
return model | 6b5e5a5d812b20c30bac3f81289a553bdc4884d4 | 2,142 |
import zlib
def encode_zip(data):
"""Zip-compress data. Implies base64 encoding of zip data."""
zipped = zlib.compress(data)
return encode_b64(zipped) | aa048125edd67a411715bf748bf832a6e6d7104f | 2,143 |
def create_class_mask(img, color_map, is_normalized_img=True, is_normalized_map=False, show_masks=False):
"""
Function to create C matrices from the segmented image, where each of the C matrices is for one class
with all ones at the pixel positions where that class is present
img = The segmented image
color_map = A list with tuples that contains all the RGB values for each color that represents
some class in that image
is_normalized_img = Boolean - Whether the image is normalized or not
If normalized, then the image is multiplied with 255
is_normalized_map = Boolean - Represents whether the color map is normalized or not, if so
then the color map values are multiplied with 255
show_masks = Wherether to show the created masks or not
"""
if is_normalized_img and (not is_normalized_map):
img *= 255
if is_normalized_map and (not is_normalized_img):
img = img / 255
mask = []
hw_tuple = img.shape[:-1]
for color in color_map:
color_img = []
for idx in range(3):
color_img.append(np.ones(hw_tuple) * color[idx])
color_img = np.array(color_img, dtype=np.uint8).transpose(1, 2, 0)
mask.append(np.uint8((color_img == img).sum(axis = -1) == 3))
return np.array(mask) | 97452e568d0a29b438a61fc96d90231a318e919b | 2,144 |
import itertools
def reconstruct_grid(mask, ds_dl):
"""
Reconstruction of 2d grid.
Args:
mask (ndarray): land mask used.
ds_dl (ndarray): trained model prediction.
"""
landmask = np.argwhere(np.isnan(mask))
empty = np.zeros((ds_dl.shape[0], mask.shape[0], mask.shape[1]))
counter = 0
for i, j in itertools.product(list(range(mask.shape[0])),list(range(mask.shape[1]))):
if np.argwhere(np.logical_and(np.isin(landmask[:,0], i), np.isin(landmask[:,1], j))).shape[0] > 0:
empty[:, i, j] = np.nan
else:
empty[:, i, j] = ds_dl[:, counter]
counter += 1
return empty | 4d220e0d4ae96ee1ddc55e53f21f2a35d920b03e | 2,145 |
def conv_kernel_initializer(shape, dtype=None):
"""卷积核初始化
和 tf.variance_scaling_initializer最大不同之处就是在于,tf.variance_scaling_initializer 使用的是 truncated norm,
但是却具有未校正的标准偏差,而这里使用正态分布。类似地,tf.initializers.variance_scaling使用带有校正后的标准偏差。
Args:
shape: 卷积核的shape
dtype: 卷积核的dtype
Returns:
经过初始化后的卷积核
"""
kernel_height, kernel_width, input_filters, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random.normal(shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) | f7fd5665aeb8eb592a5f1f0f1785dfd84c9d8d98 | 2,146 |
def prediction_func(data, g_data, grid_search, param_list):
"""Function for using dataset to train a model and
predicting prices for a generated data.
Parameter search is done using RandomizedSearchCV since it is computationally
more efficientcompared to GridSearchCV.
In param_list, learning_rate, subsample and max_depth,
min_child_weight, gamma and colsample_bytree can be included.
Args:
| data (pd.Dataframe): the dataset including house features and prices
| g_data (pd.Dataframe): randomly generated house features for prediction purposes
| grid_search (bool): indicates whether model is trained with parameter
search(True) or use default values(False)
| param_list (list): the list of parameters to be included in parameter search
Returns:
the predicted prices for houses in g_data (np.array)
"""
# Base Model
xgb_reg = xgb.XGBRegressor(n_treads=-1)
if grid_search:
# Search for best parameters in model
params = {
"learning_rate": [i / 20 for i in range(1, 11)],
"min_child_weight": [i for i in range(3, 12)],
"gamma": [i / 10.0 for i in range(3, 8)],
"subsample": [i / 10.0 for i in range(7, 11)],
"colsample_bytree": [i / 10.0 for i in range(6, 11)],
"max_depth": [i for i in range(3, 8)],
}
# Only includes selected parameters
params = {key: params[key] for key in param_list}
xgb_reg = RandomizedSearchCV(
estimator=xgb_reg,
param_distributions=params,
n_iter=5,
cv=3,
random_state=23,
iid=False,
)
xgb_reg.fit(data.drop("price", axis=1), data.price)
return xgb_reg.predict(g_data) | b747578879054947e91e5285b82cf3e07fa313da | 2,147 |
def thv_to_zxy(theta, h):
"""Convert coordinates from (theta, h, v) to (z, x, y) space."""
cos_p = np.cos(theta)
sin_p = np.sin(theta)
srcx = +RADIUS * cos_p - h * sin_p
srcy = +RADIUS * sin_p + h * cos_p
detx = -RADIUS * cos_p - h * sin_p
dety = -RADIUS * sin_p + h * cos_p
return srcx, srcy, detx, dety | 64370dc6c4060a718506a243414afdd698881147 | 2,148 |
from datetime import datetime
def get_most_stale_file(logpath=DEFAULT_PATH):
"""
returns the filename of the file in the fileset that was least recently backed up
and the time of the last backup
"""
oldest_name = ""
oldest_date = datetime.max
for fstat in get_fileset_statlist():
last_backup = datetime.strptime(
get_last_upload_times(fstat[STAT_KEYS.SOURCE], n_times=1)[0],
TIME_FORMAT
)
if last_backup < oldest_date:
oldest_date = last_backup
oldest_name = fstat[STAT_KEYS.SOURCE]
return oldest_name, oldest_date | e0000847513ffeb97b8df0c26941ca4e3380f09d | 2,149 |
from typing import Mapping
from typing import Dict
import re
import logging
def get_instances(context: models.Context) -> Mapping[str, Instance]:
"""Get a list of Instance matching the given context, indexed by instance id."""
instances: Dict[str, Instance] = {}
if not apis.is_enabled(context.project_id, 'compute'):
return instances
gce_api = apis.get_api('compute', 'v1', context.project_id)
requests = [
gce_api.instances().list(project=context.project_id, zone=zone)
for zone in get_gce_zones(context.project_id)
]
items = apis_utils.batch_list_all(
api=gce_api,
requests=requests,
next_function=gce_api.instances().list_next,
log_text=f'listing gce instances of project {context.project_id}')
for i in items:
result = re.match(
r'https://www.googleapis.com/compute/v1/projects/[^/]+/zones/([^/]+)/',
i['selfLink'])
if not result:
logging.error('instance %s selfLink didn\'t match regexp: %s', i['id'],
i['selfLink'])
continue
zone = result.group(1)
labels = i.get('labels', {})
if not context.match_project_resource(location=zone, labels=labels):
continue
instances[i['id']] = Instance(project_id=context.project_id,
resource_data=i)
return instances | 10f4eae30b0a5c752c45378574ba4620bd859320 | 2,150 |
def UploadChanges():
"""Upload changes, don't prompt."""
# TODO(jfb) Using the commit queue and avoiding git try + manual commit
# would be much nicer. See '--use-commit-queue'
return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f']) | 3f1f3cb4a4a6250540079c614167300921a9cded | 2,151 |
def svn_fs_delete_fs(*args):
"""svn_fs_delete_fs(char const * path, apr_pool_t pool) -> svn_error_t"""
return _fs.svn_fs_delete_fs(*args) | 6e1f34d82899fc257c723990c55853b35f0b06d3 | 2,152 |
from re import T
def translate_output(_output, n_classes, is_binary_classification=False):
""" Gets matrix with one hot encoding where the 1 represent index of class.
Parameters
----------
_output : theano.tensor.matrix
Output sample.
n_classes : int
Number of classes (or size of one hot encoding rows)
is_binary_classification : bool
This flag means that model is for binary classification.
Returns
-------
theano.tensor.matrix
Returns one hot encoding.
"""
if is_binary_classification:
return T.sgn(_output)
else:
return to_one_hot(T.argmax(_output, axis=-1), n_classes) | 03137e6b0704477a69211d454ee5e05a5ab02636 | 2,153 |
import os
import sys
def get_agent_config_vars():
""" Read and parse config.ini """
if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, 'config.ini'))):
config_parser = ConfigParser.SafeConfigParser()
config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, 'config.ini')))
try:
file_path = config_parser.get('agent', 'file_path')
# filters
filters_include = config_parser.get('agent', 'filters_include')
filters_exclude = config_parser.get('agent', 'filters_exclude')
# message parsing
json_top_level = config_parser.get('agent', 'json_top_level') # 'Event'
#project_field = config_parser.get('agent', 'project_field')
instance_field = config_parser.get('agent', 'instance_field') # 'System.Computer'
device_field = config_parser.get('agent', 'device_field') # 'System.Provider.@Name'
timestamp_field = config_parser.get('agent', 'timestamp_field') or 'timestamp' # 'System.TimeCreated.@SystemTime'
timestamp_format = config_parser.get('agent', 'timestamp_format', raw=True) or 'epoch'
data_fields = config_parser.get('agent', 'data_fields')
except ConfigParser.NoOptionError:
logger.error('Agent not correctly configured. Check config file.')
sys.exit(1)
if len(file_path) != 0:
file_regex = r".*\.evtx$|.*\.evt$"
files = file_path.split(',')
if len(files) > 1:
# get evtx files and files within directories
logger.debug(files)
files = [ i for j in
map(lambda k:
get_file_list_for_directory(k, file_regex),
files)
for i in j if i]
else:
files = get_file_list_for_directory(files[0], file_regex)
else:
logger.warning('Agent not correctly configured (file_path). Check config file.')
sys.exit(1)
# filters
if len(filters_include) != 0:
filters_include = filters_include.split('|')
if len(filters_exclude) != 0:
filters_exclude = filters_exclude.split('|')
if len(data_fields) != 0:
data_fields = data_fields.split(',')
# timestamp format
timestamp_format = timestamp_format.partition('.')[0]
if '%z' in timestamp_format or '%Z' in timestamp_format:
ts_format_info = strip_tz_info(timestamp_format)
else:
ts_format_info = {'strip_tz': False, 'strip_tz_fmt': '', 'timestamp_format': timestamp_format}
# add parsed variables to a global
config_vars = {
'files': files,
'filters_include': filters_include,
'filters_exclude': filters_exclude,
'data_format': 'JSON',
'json_top_level': json_top_level,
'project_field': '',
'instance_field': instance_field,
'device_field': device_field,
'data_fields': data_fields,
'timestamp_field': timestamp_field,
'timestamp_format': ts_format_info['timestamp_format'],
'strip_tz': ts_format_info['strip_tz'],
'strip_tz_fmt': ts_format_info['strip_tz_fmt']
}
return config_vars
else:
logger.warning('No config file found. Exiting...')
exit() | 52ff56e7a21e7a3d81fe05b9fd5f4a447971b129 | 2,154 |
def _sphere_point_to_uv(point: Point) -> Vec2d:
"""Convert a 3D point on the surface of the unit sphere into a (u, v) 2D point"""
u = atan2(point.y, point.x) / (2.0 * pi)
return Vec2d(
u=u if u >= 0.0 else u + 1.0,
v=acos(point.z) / pi,
) | c0eb4abb1ebc55f74b908a85f0cb94f71a528c32 | 2,155 |
import tqdm
def generate_formula_dict(materials_store, query=None):
"""
Function that generates a nested dictionary of structures
keyed first by formula and then by task_id using
mongo aggregation pipelines
Args:
materials_store (Store): store of materials
Returns:
Nested dictionary keyed by formula-mp_id with structure values.
"""
props = ["pretty_formula", "structure", "task_id", "magnetic_type"]
results = list(materials_store.groupby("pretty_formula", properties=props,
criteria=query))
formula_dict = {}
for result in tqdm.tqdm(results):
formula = result['_id']['pretty_formula']
task_ids = [d['task_id'] for d in result['docs']]
structures = [d['structure'] for d in result['docs']]
formula_dict[formula] = dict(zip(task_ids, structures))
return formula_dict | ae232c806972262029966307e489df0b12d646f5 | 2,156 |
import os
def default_config():
"""Provides a default configuration file location."""
return os.path.expanduser('~/.config/discogstagger/discogs_tagger.conf') | 2b86700484916ea2f6c47935ec8a43aa0d920184 | 2,157 |
def truncate(wirevector_or_integer, bitwidth):
""" Returns a wirevector or integer truncated to the specified bitwidth
:param wirevector_or_integer: Either a wirevector or and integer to be truncated
:param bitwidth: The length to which the first argument should be truncated.
:return: Returns a tuncated wirevector or integer as appropriate
This function truncates the most significant bits of the input, leaving a result
that is only "bitwidth" bits wide. For integers this is performed with a simple
bitmask of size "bitwidth". For wirevectors the function calls WireVector.truncate
and returns a wirevector of the specified bitwidth.
Examples: ::
truncate(9,3) # returns 3 (0b101 truncates to 0b101)
truncate(5,3) # returns 3 (0b1001 truncates to 0b001)
truncate(-1,3) # returns 7 (-0b1 truncates to 0b111)
y = truncate(x+1, x.bitwidth) # y.bitwdith will equal x.bitwidth
"""
if bitwidth < 1:
raise PyrtlError('bitwidth must be a positive integer')
x = wirevector_or_integer
try:
return x.truncate(bitwidth)
except AttributeError:
return x & ((1 << bitwidth)-1) | 7ff6d22061944f4202bc69dfde109c1cead20972 | 2,158 |
def pcoef(xte, yte, rle, x_cre, y_cre, d2ydx2_cre, th_cre, surface):
# Docstrings
"""evaluate the PARSEC coefficients"""
# Initialize coefficients
coef = np.zeros(6)
# 1st coefficient depends on surface (pressure or suction)
if surface.startswith('p'):
coef[0] = -sqrt(2*rle)
else:
coef[0] = sqrt(2*rle)
# Form system of equations
A = np.array([
[xte**1.5, xte**2.5, xte**3.5, xte**4.5, xte**5.5],
[x_cre**1.5, x_cre**2.5, x_cre**3.5, x_cre**4.5,
x_cre**5.5],
[1.5*sqrt(xte), 2.5*xte**1.5, 3.5*xte**2.5,
4.5*xte**3.5, 5.5*xte**4.5],
[1.5*sqrt(x_cre), 2.5*x_cre**1.5, 3.5*x_cre**2.5,
4.5*x_cre**3.5, 5.5*x_cre**4.5],
[0.75*(1/sqrt(x_cre)), 3.75*sqrt(x_cre), 8.75*x_cre**1.5,
15.75*x_cre**2.5, 24.75*x_cre**3.5]
])
B = np.array([
[yte - coef[0]*sqrt(xte)],
[y_cre - coef[0]*sqrt(x_cre)],
[tan(th_cre*pi/180) - 0.5*coef[0]*(1/sqrt(xte))],
[-0.5*coef[0]*(1/sqrt(x_cre))],
[d2ydx2_cre + 0.25*coef[0]*x_cre**(-1.5)]
])
# Solve system of linear equations
# X = np.linalg.solve(A,B)
X = np.linalg.lstsq(A,B)[0]
# Gather all coefficients
coef[1:6] = X[0:5,0]
# Return coefficients
return coef | 43cc56ec7f29267678ebbc3572633e5073cda117 | 2,159 |
import psutil
def cpu_usage(self, max_cpu_percentage=80):
"""Limit max cpu usage
"""
if psutil.cpu_percent() < max_cpu_percentage:
hevlog.logging.debug('[cpu usage] {}%'.format(psutil.cpu_percent()))
return True
else:
hevlog.logging.debug('[cpu usage] {}%'.format(psutil.cpu_percent()))
return False | c4213ed768351a5d9e4e8a14bd951a5eb9f3b2ef | 2,160 |
from datetime import datetime
import time
import math
import os
def search_fromCSV(Data,holy_array, bookmark = 0):
"""
holy array: array of words that is going to be compared with Twitter's text data
"""
print("Initializing crawler")
WINDOW_SIZE = "1920,1080"
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=%s" % WINDOW_SIZE)
chrome_options.add_argument('--no-sandbox')
driver = webdriver.Chrome(chrome_options=chrome_options)
try:
for l in range(bookmark,len(Data)):
# Here we loop the automated query cycle
term = Data["Company"].iloc[l]
foundation = datetime.datetime.strptime(Data["FundDate"].iloc[l],"%Y-%m-%d").date() - datetime.timedelta(17+30*(month_before_funding-1))
bracket = foundation - datetime.timedelta(30)
bracko = bracket.strftime("%Y-%m-%d")
# Replacing special characters that will be in query
if "&" in term:
term = term.replace("&","%26")
if "#" in term:
term = term.replace("#","%23")
if "\\" in term:
term = term.replace("\\","/")
driver.get(
'https://twitter.com/search?q=\"' +
term+'\"' + '%20until%3A{}%20since%3A{}'.format(foundation,bracko) +'&src=typed_query&f=live'
)
# There must be a second between queries, at least.
try:
WebDriverWait(driver, 2.5).until(
trinity_condition()
)
# What's this, it just continues??
except TimeoutException:
#Add case when no internet
driver.quit()
return False
#Tie to database to mark a problem
try:
ak = time.perf_counter()
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH,"//div[@data-testid='emptyState']"))
)
u = driver.find_element_by_xpath("//div[@data-testid='emptyState']")
um = pd.Series(Data["tweets"])
um[l] = 0
Data["tweets"] = um
links = {("EMPTY_RESULT",Data["Company"].iloc[l],None,None,None,bracket)}
print("ID: {}".format(Data["ID"].iloc[l]))
print(links)
except TimeoutException:
# Put except
links=set()
u = True
# This will need tweaking too
count_scrap = time.perf_counter()
while u == True:
# Reference /html/body/div/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/div/section/div/div/div[x]/div/div/article
time.sleep(0.2)
if math.trunc(driver.execute_script("return document.body.scrollHeight;") - driver.execute_script("return document.documentElement.scrollTop;") - driver.execute_script("return window.innerHeight;")) == 0:
try:
WebDriverWait(driver, 1.5).until(
EC.presence_of_element_located((By.XPATH,"//div[@role='progressbar']"))
)
# Add here what happens when Twitter loads eternally
except TimeoutException:
u = False
try:
WebDriverWait(driver, 5).until_not(
EC.presence_of_element_located((By.XPATH,"//div[@role='progressbar']"))
)
except TimeoutException:
um = pd.Series(Data["tweets"])
um[l] = 0
Data["tweets"] = um
u = False
links = links.union(extract(driver))
# u=False or break??
# Why is this here? Relocate to extract
if 1756 + driver.execute_script("return document.documentElement.scrollTop;") >= driver.execute_script("return document.body.scrollHeight;"):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
else:
driver.execute_script("window.scrollTo(0, document.documentElement.scrollTop + 1756*2);")
try:
u = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/main/div/div/div/div/div/div[2]/div/div/div/div[2]/div/span/span")
driver.quit()
return False
except:
print("{} links found, word: {} (ID: {}) in {} seconds".format(len(links),term,Data["ID"].iloc[l],time.perf_counter()-count_scrap))
print(links)
uk = time.perf_counter()
print("Search of {} items takes {} seconds".format(len(links),uk-ak))
lengths = pd.Series(Data["tweets"])
lengths[l] = len(links)
Data["tweets"] = lengths
print(Data[["Company","tweets"]])
acceptable = [t>14 for t in Data["tweets"]]
ouaga = acceptable.count(True)
if l +1 - bookmark== 0: perc = 0
else: perc = ouaga*100/(l+1- bookmark)
print("Valid companies: {} out of {}. ({}%), with an average amount of Tweets of {}".format(ouaga, l+1-bookmark, perc, np.mean(Data["tweets"].iloc[acceptable])))
# Don't connect to database if no tweets gathered
stops = nltk.corpus.stopwords.words("english")
def clean(tokens,stops):
tokens = pd.Series(x for x in tokens if not x in stops)
l = nltk.wordnet.WordNetLemmatizer()
lemmatized = []
for word, tag in nltk.pos_tag(tokens):
if tag.startswith('NN'):
pos = 'n'
elif tag.startswith('VB'):
pos = 'v'
else:
pos = 'a'
lemmatized.append(l.lemmatize(word, pos))
return pd.Series(lemmatized)
def select(lista, holy_array):
if (list(lista.values) == list(clean(nltk.tokenize.word_tokenize("TweetsNotFound", "english"),stops).values) or list(lista.values) == list(clean(nltk.tokenize.word_tokenize("EMPTY_RESULT", "english"),stops).values)):
return [j for j in lista]
else:
return [j for j in lista if j in holy_array.values]
storage.add_tweets([(str(" ".join(select(clean(nltk.tokenize.word_tokenize(z[0]),stops),holy_array))),fiki.polarity_scores(z[0])["compound"],fiki.polarity_scores(z[0])["pos"],fiki.polarity_scores(z[0])["neg"],fiki.polarity_scores(z[0])["neu"], Data["Company"].iloc[l], z[3], z[2], z[1],bracket) for z in links])
reportime = (datetime.timedelta(hours=2) + datetime.datetime.now()).strftime("%H:%M:%S-%Y/%m/%d")
k = open("data/errorLog.txt", "a")
k.write("Checkpoint saved {} at time {} at company {} and company ID {} with {} tweets".format(bracko, reportime, Data["Company"].iloc[l], Data["ID"].iloc[l], len(links) ) + os.linesep)
k.close()
return True
finally:
pass
print("Final Dataset")
print(Data[["Company","tweets"]])
driver.quit() | 01b98fd2650a4ebbdb7ce1e9a995483e8b40357f | 2,161 |
def iscircular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
"""
slow_runner = linked_list.head
fast_runner = linked_list.head
while slow_runner != None and fast_runner.next != None:
slow_runner = slow_runner.next
fast_runner = fast_runner.next.next
if slow_runner == fast_runner:
return True
return False | 04f86497dae2a2ee77afd37f13bdba8e18ae52b9 | 2,162 |
def shape_extent_to_header(shape, extent, nan_value=-9999):
""" Create a header dict with shape and extent of an array
"""
ncols = shape[1]
nrows = shape[0]
xllcorner = extent[0]
yllcorner = extent[2]
cellsize_x = (extent[1]-extent[0])/ncols
cellsize_y = (extent[3]-extent[2])/nrows
if cellsize_x != cellsize_y:
raise ValueError('extent produces different cellsize in x and y')
cellsize = cellsize_x
header = {'ncols':ncols, 'nrows':nrows,
'xllcorner':xllcorner, 'yllcorner':yllcorner,
'cellsize':cellsize, 'NODATA_value':nan_value}
return header | 957b59e7f464901a5430fd20ab52f28507b55887 | 2,163 |
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.wals_model, opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.wals_size, opt.dropout, embeddings,
opt.bridge) | 73b379545aeeb3226ea019cad0a692b00cd7630b | 2,164 |
import glob
import os
def load_rendered_images_object_type(resources_path, n_channels, mode="render"):
"""
Import images from the resources dir with certain number of channels
:param resources_path: Dir path from were images are fetched
:param n_channels: Number of colors for the images
:return:
"""
path_list = list(glob.glob(resources_path + '/*.png'))
file_list = [os.path.basename(x) for x in path_list]
object_list = []
render_numbers = np.array([int(x.split("_")[-2]) for x in file_list])
x_train = np.array([imageio.imread(x)[:, :, :n_channels] for x in path_list])
if mode == "angles":
labels = 2 * np.pi * render_numbers / np.amax(render_numbers)
elif mode == "circle":
angles = 2 * np.pi * render_numbers / np.amax(render_numbers)
labels = np.zeros((len(angles), 2))
labels[:, 0] = np.cos(angles)
labels[:, 1] = np.sin(angles)
else:
labels = render_numbers
return x_train, labels | b3aca205c0b5c07a115504bf233dafc12b41ca5e | 2,165 |
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b",
**kwargs) | 1d7e0bffe67f9d2f340563b21e1f995201877165 | 2,166 |
import logging
def logged(class_):
"""Class-level decorator to insert logging.
This assures that a class has a ``.log`` member.
::
@logged
class Something:
def __init__(self, args):
self.log(f"init with {args}")
"""
class_.log= logging.getLogger(class_.__qualname__)
return class_ | cd58e355151ab99aa1694cbd9fb6b710970dfa19 | 2,167 |
def TableInFirstNSStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder) | 5ea3cf66842eaf026a36bb241c277076cc8650b8 | 2,168 |
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion function for ge images.
As input ge images are required. It will then determine the type of images and do the correct conversion
:param output_file: filepath to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_siemens(dicom_input)
# remove duplicate slices based on position and data
dicom_input = convert_generic.remove_duplicate_slices(dicom_input)
# remove localizers based on image type
dicom_input = convert_generic.remove_localizers_by_imagetype(dicom_input)
# remove_localizers based on image orientation (only valid if slicecount is validated)
dicom_input = convert_generic.remove_localizers_by_orientation(dicom_input)
if _is_4d(dicom_input):
logger.info('Found sequence type: MOSAIC 4D')
return _mosaic_4d_to_nifti(dicom_input, output_file)
grouped_dicoms = _classic_get_grouped_dicoms(dicom_input)
if _is_classic_4d(grouped_dicoms):
logger.info('Found sequence type: CLASSIC 4D')
return _classic_4d_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | 0b77d190c2379e9b2ad5fbf9217e1604a7df8bc9 | 2,169 |
def lot_vectors_dense_internal(
sample_vectors,
sample_distributions,
reference_vectors,
reference_distribution,
metric=cosine,
max_distribution_size=256,
chunk_size=256,
spherical_vectors=True,
):
"""Efficiently compute linear optimal transport vectors for
a block of data provided as a list of distributions and a
corresponding list of arrays of vectors.
Parameters
----------
sample_vectors: numba.typed.List of ndarrays
A set of vectors for each distribution.
sample_distributions: numba.typed.List of ndarrays
A set of distributions (1d arrays that sum to one). The ith element of a given
distribution is the probability mass on the ith row of the corresponding entry
in the ``sample_vectors`` list.
reference_vectors: ndarray
The reference vector set for LOT
reference_distribution: ndarray
The reference distribution over the set of reference vectors
metric: function(ndarray, ndarray) -> float
The distance function to use for distance computation
max_distribution_size: int (optional, default=256)
The maximum size of a distribution to consider; larger
distributions over more vectors will be truncated back
to this value for faster performance.
chunk_size: int (optional, default=256)
Operations will be parallelised over chunks of the input.
This specifies the chunk size.
spherical_vectors: bool (optional, default=True)
Whether the vectors live on an n-sphere instead of euclidean space
and thus require some degree of spherical correction.
Returns
-------
lot_vectors: ndarray
The raw linear optimal transport vectors correpsonding to the input.
"""
n_rows = len(sample_vectors)
result = np.zeros((n_rows, reference_vectors.size), dtype=np.float64)
n_chunks = (n_rows // chunk_size) + 1
for n in range(n_chunks):
chunk_start = n * chunk_size
chunk_end = min(chunk_start + chunk_size, n_rows)
for i in range(chunk_start, chunk_end):
row_vectors = sample_vectors[i].astype(np.float64)
row_distribution = sample_distributions[i]
if row_vectors.shape[0] > max_distribution_size:
best_indices = np.argsort(-row_distribution)[:max_distribution_size]
row_vectors = row_vectors[best_indices]
row_distribution = row_distribution[best_indices]
row_sum = row_distribution.sum()
if row_sum > 0.0:
row_distribution /= row_sum
if row_vectors.shape[0] > reference_vectors.shape[0]:
cost = chunked_pairwise_distance(
row_vectors, reference_vectors, dist=metric
)
else:
cost = chunked_pairwise_distance(
reference_vectors, row_vectors, dist=metric
).T
current_transport_plan = transport_plan(
row_distribution, reference_distribution, cost
)
transport_images = (
current_transport_plan * (1.0 / reference_distribution)
).T @ row_vectors
if spherical_vectors:
l2_normalize(transport_images)
transport_vectors = transport_images - reference_vectors
if spherical_vectors:
tangent_vectors = project_to_sphere_tangent_space(
transport_vectors, reference_vectors
)
l2_normalize(tangent_vectors)
scaling = tangent_vectors_scales(
transport_images, reference_vectors
)
transport_vectors = tangent_vectors * scaling
result[i] = transport_vectors.flatten()
# Help the SVD preserve spherical data by sqrt entries
if spherical_vectors:
for i in range(result.shape[0]):
for j in range(result.shape[1]):
result[i, j] = np.sign(result[i, j]) * np.sqrt(np.abs(result[i, j]))
return result | d7f9eaad6b7292f2c28621f361094a88e7deb8a6 | 2,170 |
import rasterio as rio
def load(
filename,
rsc_file=None,
rows=None,
cols=None,
band=1,
**kwargs,
):
"""Load a file, either using numpy or rasterio"""
if rsc_file:
rsc_data = load_rsc(rsc_file)
return load_stacked_img(filename, rsc_data=rsc_data, rows=rows, cols=cols)
else:
try:
except ImportError:
raise ValueError("Need to `conda install rasterio` to load gdal-readable")
with rio.open(filename) as src:
return src.read(band) | 873933b80b7e87f64b10ad74cc8ed25238a93fb3 | 2,171 |
def simple_scan_network():
"""
Do a simple network scan, which only works if your network configuration
is 192.168.1.x
"""
base_ip = "192.168.1."
addresses = ['127.0.0.1']
for index in range(1, 255):
addresses.extend([base_ip + str(index)])
return addresses | b0f19ae1c98678e87d270b308b5359df9a6a4d30 | 2,172 |
import os
import argparse
def check_valid_file_or_folder(value):
"""verifies filename exists and isn't a link"""
if value is not None:
if not os.path.isfile(value) and not os.path.isdir(value):
raise argparse.ArgumentTypeError("{} does not exist or is not a file/folder.".
format(value))
check_for_link(value)
return value | bd6a2149f1092c28d634caf8eb6110b32fc2b5a8 | 2,173 |
def channel_lvlv_2jet():
""" Mostly based on table 8 of the combination paper for the uncertainties and
table 9 for the event counts. """
channel = ROOT.RooStats.HistFactory.Channel( "HWWlvlv2Jet" )
container.append(channel)
channel.SetData(55)
background = ROOT.RooStats.HistFactory.Sample("background")
background.SetValue(36*1.1)
# background.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036)
# background.AddOverallSys("JES", 0.93, 1.07)
channel.AddSample(background)
container.append(background)
signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH")
signalGGFttH.SetValue(10.9*1.00*0.19) # increase by a factor for better agreement with ATLAS contour
signalGGFttH.AddNormFactor("mu", 1, 0, 6)
signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10)
signalGGFttH.AddNormFactor("muT_lvlv", 1, -5, 10)
signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036)
signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13)
signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 0.96, 1.04)
signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH3in", 0.96, 1.04)
signalGGFttH.AddOverallSys("QCDscale_Higgs_acceptance_2jet", 0.97, 1.03)
signalGGFttH.AddOverallSys("UE_2jet", 0.95, 1.05)
signalGGFttH.AddOverallSys("JES", 0.94, 1.06)
channel.AddSample(signalGGFttH)
container.append(signalGGFttH)
signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH")
signalVBFVH.SetValue(10.9*1.000*0.81) # increase by a factor for better agreement with ATLAS contour
signalVBFVH.AddNormFactor("mu", 1, 0, 6)
signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10)
signalVBFVH.AddNormFactor("muW_lvlv", 1, -5, 10)
signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036)
signalVBFVH.AddOverallSys("UE_2jet", 0.95, 1.05)
signalVBFVH.AddOverallSys("JES", 0.94, 1.06)
channel.AddSample(signalVBFVH)
container.append(signalVBFVH)
return channel | f60609a0bf6f22dc850fcb52c4a19b6bae737abc | 2,174 |
def vtkVariantStrictEquality(s1, s2):
"""
Check two variants for strict equality of type and value.
"""
s1 = vtk.vtkVariant(s1)
s2 = vtk.vtkVariant(s2)
t1 = s1.GetType()
t2 = s2.GetType()
# check based on type
if t1 != t2:
return False
v1 = s1.IsValid()
v2 = s2.IsValid()
# check based on validity
if (not v1) and (not v2):
return True
elif v1 != v2:
return False
# extract and compare the values
r1 = getattr(s1, _variant_method_map[t1])()
r2 = getattr(s2, _variant_method_map[t2])()
return (r1 == r2) | cb529c35f6dfc7e20fcff79d5c38b41bd43f1292 | 2,175 |
def is_network_failure(error):
"""Returns True when error is a network failure."""
return ((isinstance(error, RETRY_URLLIB_EXCEPTIONS)
and error.code in RETRY_HTTP_CODES) or
isinstance(error, RETRY_HTTPLIB_EXCEPTIONS) or
isinstance(error, RETRY_SOCKET_EXCEPTIONS) or
isinstance(error, RETRY_REQUESTS_EXCEPTIONS) or
is_retriable_requests_httperror(error)) | 647d10b257b1cb7f78243629edd2b425104f1787 | 2,176 |
import torch
def predict(model, X, threshold=0.5):
"""Generate NumPy output predictions on a dataset using a given model.
Args:
model (torch model): A Pytroch model
X (dataloader): A dataframe-based gene dataset to predict on
"""
X_tensor, _ = convert_dataframe_to_tensor(X, [])
model.eval()
with torch.no_grad():
y_pred = (model(X_tensor) >= threshold).int().numpy()
return y_pred | 57b6137cc8f7e0753e6438432f56b471717a5d88 | 2,177 |
def color_image(
img: np.ndarray, unique_colors=True, threshold=100, approximation_accuracy=150
) -> np.ndarray:
"""
This function detects simple shapes in the image and colors them.
Detected figures will be also subscribed in the final image. The function
can detect triangles, quadrilateral, and circles; any other figure will be
marked "UNEXPECTED".
The algorithm uses OpenCV to find contours on a grayscale version of
the image. Then it uses a polygon approximation algorithm to reduce the
number of vertices in contours. The resulted polygons are used to identify
and color figures in the image.
parameters:
img - image with figures to color
unique_colors - flag to color all figures in unique colores
independent of the number of vertices. The default behavior is
coloring all the figures of the same type in one color
threshold - background threshold for a grayscale image, using that
the algo will separate figures from the background
approximation_accuracy - accuracy of polygon approximation for
detected contours
output:
the image with colored and subscribed figures
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply threshold
thresholded_im = np.zeros(img.shape[:2], dtype=np.uint8)
thresholded_im[gray > threshold] = 255
contours, _ = cv2.findContours(
thresholded_im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
if unique_colors:
colors = gen_colors(len(contours))
for i, contour in enumerate(contours):
# find positions of vertices to count them
# we need some value to estimate approximation accuracy - let it be perimeter
object_perimeter = cv2.arcLength(contour, closed=True)
approx = cv2.approxPolyDP(
contour, epsilon=object_perimeter / approximation_accuracy, closed=True
)
n_vertices = len(approx)
# find object centers
# M = cv2.moments(contour)
x, y = approx.squeeze().mean(axis=0).astype(int)
# offset to the left for x
x = (x + 2 * approx[:, 0, 0].min()) // 3
# COLORING PART
# highlight contours
cv2.drawContours(img, [contour], 0, (255, 255, 255), 4)
# fill the object
if unique_colors:
color = colors[i].tolist()
else:
color = get_color_for_figure(n_vertices)
cv2.fillPoly(img, pts=[contour], color=color)
# subscribe the figure
print_figure_name(img, n_vertices, (x, y))
return img | 5637febc69dcc2b3e641f0d79f2e21c6dc7d04ec | 2,178 |
from pathlib import Path
def restore_model(pb_path):
"""Restore the latest model from the given path."""
subdirs = [x for x in Path(pb_path).iterdir()
if x.is_dir() and 'temp' not in str(x)]
latest_model = str(sorted(subdirs)[-1])
predict_fn = predictor.from_saved_model(latest_model)
return predict_fn | bded95b196081e19ca1c70127871abb99d3526d0 | 2,179 |
import math
def _generate_resolution_shells(low, high):
"""Generate 9 evenly spaced in reciprocal space resolution
shells from low to high resolution, e.g. in 1/d^2."""
dmin = (1.0 / high) * (1.0 / high)
dmax = (1.0 / low) * (1.0 / low)
diff = (dmin - dmax) / 8.0
shells = [1.0 / math.sqrt(dmax)]
for j in range(8):
shells.append(1.0 / math.sqrt(dmax + diff * (j + 1)))
return shells | 52fa4309f2f34a39a07d8524dd7f226e3d1bae6a | 2,180 |
from typing import Optional
from typing import Tuple
def add_ports_from_markers_square(
component: Component,
pin_layer: Layer = (69, 0),
port_layer: Optional[Layer] = None,
orientation: Optional[int] = 90,
min_pin_area_um2: float = 0,
max_pin_area_um2: float = 150 * 150,
pin_extra_width: float = 0.0,
port_names: Optional[Tuple[str, ...]] = None,
port_name_prefix: str = "o",
) -> Component:
"""add ports from markers center in port_layer
squared
Args:
component: to read polygons from and to write ports to
pin_layer: for port markers
port_layer: for the new created port
orientation: in degrees 90: north, 0: east, 180: west, 270: south
min_pin_area_um2: ignores pins with area smaller than min_pin_area_um2
max_pin_area_um2: ignore pins for area above certain size
pin_extra_width: 2*offset from pin to straight
port_names: names of the ports (defaults to {i})
"""
port_markers = read_port_markers(component, [pin_layer])
port_names = port_names or [
f"{port_name_prefix}{i+1}" for i in range(len(port_markers.polygons))
]
layer = port_layer or pin_layer
for port_name, p in zip(port_names, port_markers.polygons):
dy = snap_to_grid(p.ymax - p.ymin)
dx = snap_to_grid(p.xmax - p.xmin)
x = p.x
y = p.y
if dx == dy and max_pin_area_um2 > dx * dy > min_pin_area_um2:
component.add_port(
port_name,
midpoint=(x, y),
width=dx - pin_extra_width,
orientation=orientation,
layer=layer,
)
return component | 68858a17b5187e064232f0c101ddf9c4e812c233 | 2,181 |
def P(Document, *fields, **kw):
"""Generate a MongoDB projection dictionary using the Django ORM style."""
__always__ = kw.pop('__always__', set())
projected = set()
omitted = set()
for field in fields:
if field[0] in ('-', '!'):
omitted.add(field[1:])
elif field[0] == '+':
projected.add(field[1:])
else:
projected.add(field)
if not projected: # We only have exclusions from the default projection.
names = set(getattr(Document, '__projection__', Document.__fields__) or Document.__fields__)
projected = {name for name in (names - omitted)}
projected |= __always__
if not projected:
projected = {'_id'}
return {unicode(traverse(Document, name, name)): True for name in projected} | d88a428f5eae1e57bd3b5ddf0d31e6e7c122c27d | 2,182 |
def get_page_url(skin_name, page_mappings, page_id):
""" Returns the page_url for the given page_id and skin_name """
fallback = '/'
if page_id is not None:
return page_mappings[page_id].get('path', '/')
return fallback | 6ead4824833f1a7a002f54f83606542645f53dd6 | 2,183 |
import textwrap
def get_paragraph_head(source, maxlength, bullet_num=-1, bullet=False):
"""Return the paragraph text of specific length, optionally prefix a bullet.
Args:
source(str, PreProcessed, etree._Element)
maxlength(int)
Kwargs:
bullet(bool): False by default, otherwise prefix paragraph text with
either '* )' or '##)' where # corresponds to a zero padded
integer.
bullet_num(int): By default, the bullet is un-numerated, otherwise it
will take the bullet number.
"""
if bullet_num > -1:
bullet = True
if not bullet:
bullet_s = ""
else:
if bullet_num < 0:
bullet_s = "* ) "
else:
bullet_s = f"{bullet_num:02d}) "
if isinstance(source, PreProcessed):
string = str(source.pre_italic)
elif isinstance(source, etree._Element):
string = source.xpath("string()")
# TODO PostProcessed condition
else:
string = str(source)
string = f"{bullet_s}{string}"
if maxlength != 30:
print(f"*** maxlength: {maxlength}")
short = textwrap.shorten(string, width=maxlength, placeholder=" ...")
return short | ef809d355b8f3495b1ad337e399ad7e243784049 | 2,184 |
def create_form(erroneous_form=None):
"""Show a form to create a guest server."""
party_id = _get_current_party_id_or_404()
setting = guest_server_service.get_setting_for_party(party_id)
form = erroneous_form if erroneous_form else CreateForm()
return {
'form': form,
'domain': setting.domain,
} | 2d8e9cd6597e4ccb1b9f39d77cca45b354d99371 | 2,185 |
def apply(task, args, kwargs, **options):
"""Apply the task locally.
This will block until the task completes, and returns a
:class:`celery.result.EagerResult` instance.
"""
args = args or []
kwargs = kwargs or {}
task_id = options.get("task_id", gen_unique_id())
retries = options.get("retries", 0)
task = tasks[task.name] # Make sure we get the instance, not class.
default_kwargs = {"task_name": task.name,
"task_id": task_id,
"task_retries": retries,
"task_is_eager": True,
"logfile": None,
"delivery_info": {"is_eager": True},
"loglevel": 0}
supported_keys = fun_takes_kwargs(task.run, default_kwargs)
extend_with = dict((key, val) for key, val in default_kwargs.items()
if key in supported_keys)
kwargs.update(extend_with)
trace = TaskTrace(task.name, task_id, args, kwargs, task=task)
retval = trace.execute()
return EagerResult(task_id, retval, trace.status, traceback=trace.strtb) | 600bc142ca8d96bd020db5cb82103169d255d970 | 2,186 |
from typing import Optional
from typing import Callable
def exp_post_expansion_function(expansion: Expansion) -> Optional[Callable]:
"""Return the specified post-expansion function, or None if unspecified"""
return exp_opt(expansion, 'post') | 6d49f5e40b7c900470a5c84b37d9da1666b217c2 | 2,187 |
def return_(x):
"""Implement `return_`."""
return x | 6557a37db2020bdbb0f9dcf587f2bd42509ff937 | 2,188 |
import os
def _resolve_link(path):
"""Internal helper function. Takes a path and follows symlinks
until we either arrive at something that isn't a symlink, or
encounter a path we've seen before (meaning that there's a loop).
"""
paths_seen = []
while islink(path):
if path in paths_seen:
# Already seen this path, so we must have a symlink loop
return None
paths_seen.append(path)
# Resolve where the link points to
resolved = os.readlink(path)
if not isabs(resolved):
dir = dirname(path)
path = normpath(join(dir, resolved))
else:
path = normpath(resolved)
return path | 16e0912628d0170fb510ebe6655c55211633c160 | 2,189 |
def create(platformDetails):
"""
This function creates a new platform in the platform list
based on the passed in platform data
:param platform: platform to create in platform structure
:return: 201 on success, 406 on platform exists
"""
# Remove id as it's created automatically
if "id" in platformDetails:
del platformDetails["id"]
# Does the platform exist already?
existing_platform = (
db.session.query(Platform)
.filter(Platform.value == platformDetails["value"])
.one_or_none()
)
if existing_platform is None:
schema = PlatformSchema()
new_platform = schema.load(platformDetails, session=db.session)
db.session.add(new_platform)
db.session.commit()
# Serialize and return the newly created deployment
# in the response
data = schema.dump(new_platform)
return data, 201
# Otherwise, it already exists, that's an error
else:
abort(406, "Platform already exists") | a6b27d6b530ccc11134a001ac3b49c6cb89475a3 | 2,190 |
def _execute_cell(cell, shell, iopub, timeout=300):
"""
Execute an IPython Notebook Cell and return the cell output.
Parameters
----------
cell : IPython.nbformat.current.NotebookNode
The IPython Notebook cell to execute.
shell : IPython.kernel.blocking.channels.BlockingShellChannel
The shell channel which the cell is submitted to for execution.
iopub : IPython.kernel.blocking.channels.BlockingIOPubChannel
The iopub channel used to retrieve the result of the execution.
timeout : int
The number of seconds to wait for the execution to finish before giving
up.
Returns
-------
cell_outputs : list
The list of NotebookNodes holding the result of the execution.
"""
# Execute input
shell.execute(cell.input)
exe_result = shell.get_shell_msg(timeout=timeout)
if exe_result['content']['status'] == 'error':
raise RuntimeError('Failed to execute cell due to error: {!r}'.format(
str(exe_result['content']['evalue'])))
cell_outputs = list()
# Poll for iopub messages until no more messages are available
while True:
try:
msg = iopub.get_iopub_msg(timeout=0.5)
except Empty:
break
msg_type = msg['msg_type']
if msg_type in ('status', 'pyin', 'execute_input', 'execute_result'):
continue
content = msg['content']
node = NotebookNode(output_type=msg_type)
if msg_type == 'stream':
node.stream = content['name']
if 'text' in content:
# v4 notebook format
node.text = content['text']
else:
# v3 notebook format
node.text = content['data']
bug_text = 'Using Anaconda Cloud api site https://api.anaconda.org'
if bug_text in node.text:
# Ignore conda (spam) messages/warnings
continue
elif msg_type in ('display_data', 'pyout'):
node['metadata'] = content['metadata']
for mime, data in content['data'].items():
attr = mime.split('/')[-1].lower()
attr = attr.replace('+xml', '').replace('plain', 'text')
setattr(node, attr, data)
if msg_type == 'pyout':
node.prompt_number = content['execution_count']
elif msg_type == 'pyerr':
node.ename = content['ename']
node.evalue = content['evalue']
node.traceback = content['traceback']
else:
raise RuntimeError('Unhandled iopub message of type: {}'.format(
msg_type))
cell_outputs.append(node)
return cell_outputs | 0893611a9693ffd62bcedd5a718bc4cab144357d | 2,191 |
def VD_A_DF(data, val_col: str = None, group_col: str = None, sort=True):
"""
:param data: pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
:param val_col: str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
:param group_col: str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
:param sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
:return: stats : pandas DataFrame of effect sizes
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'estimate' : effect sizes
'magnitude' : magnitude
"""
x = data.copy()
if sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
groups = x[group_col].unique()
# Pairwise combinations
g1, g2 = np.array(list(it.combinations(np.arange(groups.size), 2))).T
# Compute effect size for each combination
ef = np.array([VD_A(list(x[val_col][x[group_col] == groups[i]].values),
list(x[val_col][x[group_col] == groups[j]].values)) for i, j in zip(g1, g2)])
return pd.DataFrame({
'A': np.unique(data[group_col])[g1],
'B': np.unique(data[group_col])[g2],
'estimate': ef[:, 0],
'magnitude': ef[:, 1]
}) | ee4b94c9a47d8e15e182c010ffdb954f2ccec4bb | 2,192 |
def getR2(y, y_fitted, chi=None):
"""
calculates the coefficient of determination R^2 for `y_fitted` as prediction for `y` over a region marked by chi>0 defined by
R^2=1 - S_res/S_tot
with S_res=int(chi*(y-y_fitted*1)**2, S_tot=int(chi*(y-m(y)*1)**2), m(y)=int(chi*y)/int(chi)
If R^2=1 then `y_fitted` is predicts `y` exactly. If R^2 then `y_fitted` does not make a better prediction than the mean.
:param y: target distribution
:type y: `esys.escript.Scalar`
:param y_fitted: fitted distribution
:type y_fitted: `esys.escript.Scalar`
:param chi: marker/weighting for region of interest
:type chi: `esys.escript.Scalar` or None
:rtype: `float`
"""
if chi is None:
chi=Scalar(1., Function(y_fitted.getFunctionSpace().getDomain()))
ybar=integrate(chi*y)/integrate(chi)
S_res=integrate(chi*(y-y_fitted)**2)
S_tot=integrate(chi*(y-ybar)**2)
if S_tot > 0:
R2=1-S_res/S_tot
else:
if S_res > 0:
R2=0.
else:
R2=1.
return R2 | 8ec0837d2d8443279af4142c8b8407b0b03af06a | 2,193 |
def basis(d, point_distribution='uniform', symbolic=True):
"""
Return all local basis function phi as functions of the
local point X in a 1D element with d+1 nodes.
If symbolic=True, return symbolic expressions, else
return Python functions of X.
point_distribution can be 'uniform' or 'Chebyshev'.
"""
X = sym.symbols('X')
if d == 0:
phi_sym = [1]
else:
if point_distribution == 'uniform':
if symbolic:
h = sym.Rational(1, d) # node spacing
nodes = [2*i*h - 1 for i in range(d+1)]
else:
nodes = np.linspace(-1, 1, d+1)
elif point_distribution == 'Chebyshev':
# Just numeric nodes
nodes = Chebyshev_nodes(-1, 1, d)
phi_sym = [Lagrange_polynomial(X, r, nodes)
for r in range(d+1)]
# Transform to Python functions
phi_num = [sym.lambdify([X], phi_sym[r], modules='numpy')
for r in range(d+1)]
return phi_sym if symbolic else phi_num | 0f369ab22a12588e10826e894142a1dd115a5aa9 | 2,194 |
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch | 9ec34fb430dab0a17461f3002f1acbbd94b6e637 | 2,195 |
def mergeSort(li):
"""Sorts a list by splitting it to smaller and smaller pieces (until they
only have one or less elements) and then merges it back using the function
``merge()``.
>>> mergeSort([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> mergeSort([5, 4, 3, 2, 1])
[1, 2, 3, 4, 5]
>>> mergeSort([3, 2, 6, 1, 4, 2, 3, 1, 1, 5, 6, -2, 2.3])
[-2, 1, 1, 1, 2, 2, 2.3, 3, 3, 4, 5, 6, 6]
"""
n = len(li)
if n < 2:
return li
return merge(mergeSort(li[:n//2]), mergeSort(li[n//2:])) | c0f38ff6779bb24ebb081b5b76661189fa2767bc | 2,196 |
import sys
import random
def mcplayout(pos, amaf_map, disp=False):
""" Start a Monte Carlo playout from a given position,
return score for to-play player at the starting position;
amaf_map is board-sized scratchpad recording who played at a given
position first """
if disp: print('** SIMULATION **', file=sys.stderr)
start_n = pos.n
passes = 0
while passes < 2 and pos.n < MAX_GAME_LEN:
if disp: print_pos(pos)
pos2 = None
# We simply try the moves our heuristics generate, in a particular
# order, but not with 100% probability; this is on the border between
# "rule-based playouts" and "probability distribution playouts".
for c, kind in gen_playout_moves(pos, pos.last_moves_neighbors(), conf['PROB_HEURISTIC']):
if disp and kind != 'random':
print('move suggestion', str_coord(c), kind, file=sys.stderr)
pos2 = pos.move(c)
if pos2 is None:
continue
# check if the suggested move did not turn out to be a self-atari
if random.random() <= (conf['PROB_RSAREJECT'] if kind == 'random' else conf['PROB_SSAREJECT']):
in_atari, ds = fix_atari(pos2, c, singlept_ok=True, twolib_edgeonly=True)
if ds:
if disp: print('rejecting self-atari move', str_coord(c), file=sys.stderr)
pos2 = None
continue
if amaf_map[c] == 0: # Mark the coordinate with 1 for black
amaf_map[c] = 1 if pos.n % 2 == 0 else -1
break
if pos2 is None: # no valid moves, pass
pos = pos.pass_move()
passes += 1
continue
passes = 0
pos = pos2
owner_map = W*W*[0]
score = pos.score(owner_map)
if disp: print('** SCORE B%+.1f **' % (score if pos.n % 2 == 0 else -score), file=sys.stderr)
if start_n % 2 != pos.n % 2:
score = -score
return score, amaf_map, owner_map | d451c195f6596c1325487e277b052b83cfae85dc | 2,197 |
def test_colour_ranges(fake_readme, monkeypatch):
"""
Whatever number we provide as coverage should produce the appropriate colour
"""
readme_file = "README"
def fake_readme_location(*args, **kwargs):
return os.path.join(TESTS_DIR, readme_file)
monkeypatch.setattr(__main__, "readme_location", fake_readme_location)
for total, colour in (
("97", "brightgreen"),
("93", "green"),
("80", "yellowgreen"),
("65", "yellow"),
("45", "orange"),
("15", "red"),
("n/a", "lightgrey"),
):
__main__.get_total = lambda: total
__main__.main([])
assert __main__.get_colour(total) == colour | 0614cfa9d33e1d5f3112a79198c7fd2e762f4e3d | 2,198 |
def remove_partitions(
cube, store, conditions=None, ktk_cube_dataset_ids=None, metadata=None
):
"""
Remove given partition range from cube using a transaction.
Remove the partitions selected by ``conditions``. If no ``conditions`` are given,
remove all partitions. For each considered dataset, only the subset of
``conditions`` that refers to the partition columns of the respective dataset
is used. In particular, a dataset that is not partitioned at all is always considered
selected by ``conditions``.
Parameters
----------
cube: kartothek.core.cube.cube.Cube
Cube spec.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Store.
conditions: Union[None, Condition, Iterable[Condition], Conjunction]
Select the partitions to be removed. Must be a condition only on partition columns.
ktk_cube_dataset_ids: Optional[Union[Iterable[Union[Str, Bytes]], Union[Str, Bytes]]]
Ktk_cube dataset IDs to apply the remove action to, optional. Default to "all".
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every the datasets, optional. Only given keys are updated/replaced. Deletion of
metadata keys is not possible.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Datasets, updated.
"""
if callable(store):
store_instance = store()
store_factory = store
else:
store_instance = store
def store_factory():
return store
existing_datasets = discover_datasets(cube, store)
for (
ktk_cube_dataset_id,
(ds, mp, delete_scope),
) in prepare_metapartitions_for_removal_action(
cube=cube,
store=store_instance,
conditions=conditions,
ktk_cube_dataset_ids=ktk_cube_dataset_ids,
existing_datasets=existing_datasets,
).items():
mp = mp.store_dataframes(
store=store_instance,
dataset_uuid=ds.uuid,
df_serializer=KTK_CUBE_DF_SERIALIZER,
)
ds_factory = metadata_factory_from_dataset(
ds, with_schema=True, store=store_factory
)
existing_datasets[ktk_cube_dataset_id] = update_dataset_from_partitions(
mp,
store_factory=store_factory,
dataset_uuid=ds.uuid,
ds_factory=ds_factory,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
metadata_merger=None,
delete_scope=delete_scope,
)
return existing_datasets | 0bede6d99e34edce32f42d9f78104ee3fdc45456 | 2,199 |